code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(
self.classname))
if self.fill:
for i in range(self.size):
context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.
fill, JavaOpcodes.AASTORE())
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP())
if self.size:
context.add_opcodes(ICONST_val(self.size), Init(
'java/util/ArrayList', 'I'))
else:
context.add_opcodes(Init('java/util/ArrayList'))
class add:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(
'java/util/List', 'add', args=['Ljava/lang/Object;'],
returns='Z'), JavaOpcodes.POP())
class Map:
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(), Init('java/util/HashMap'))
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.
INVOKEINTERFACE('java/util/Map', 'get', args=[
'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))
class put:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'), JavaOpcodes.POP())
class putAll:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'putAll', args=['Ljava/util/Map;'], returns='V'))
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args
=['Ljava/lang/String;'], returns='Ljava/lang/Class;'))
class THROW:
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(New(self.exception_class), *self.exc_arg_values)
context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Yield:
def __init__(self, yield_point):
self.yield_point = yield_point
def process(self, context):
context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.
INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[
'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(
self.classname))
if self.fill:
for i in range(self.size):
context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.
fill, JavaOpcodes.AASTORE())
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP())
if self.size:
context.add_opcodes(ICONST_val(self.size), Init(
'java/util/ArrayList', 'I'))
else:
context.add_opcodes(Init('java/util/ArrayList'))
class add:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(
'java/util/List', 'add', args=['Ljava/lang/Object;'],
returns='Z'), JavaOpcodes.POP())
class Map:
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(), Init('java/util/HashMap'))
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.
INVOKEINTERFACE('java/util/Map', 'get', args=[
'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))
class put:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'), JavaOpcodes.POP())
class putAll:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'putAll', args=['Ljava/util/Map;'], returns='V'))
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args
=['Ljava/lang/String;'], returns='Ljava/lang/Class;'))
class THROW:
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(New(self.exception_class), *self.exc_arg_values)
context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Init:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Yield:
def __init__(self, yield_point):
self.yield_point = yield_point
def process(self, context):
context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.
INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[
'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(
self.classname))
if self.fill:
for i in range(self.size):
context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.
fill, JavaOpcodes.AASTORE())
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP())
if self.size:
context.add_opcodes(ICONST_val(self.size), Init(
'java/util/ArrayList', 'I'))
else:
context.add_opcodes(Init('java/util/ArrayList'))
class add:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(
'java/util/List', 'add', args=['Ljava/lang/Object;'],
returns='Z'), JavaOpcodes.POP())
class Map:
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(), Init('java/util/HashMap'))
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.
INVOKEINTERFACE('java/util/Map', 'get', args=[
'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))
class put:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'), JavaOpcodes.POP())
class putAll:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'putAll', args=['Ljava/util/Map;'], returns='V'))
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args
=['Ljava/lang/String;'], returns='Ljava/lang/Class;'))
class THROW:
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(New(self.exception_class), *self.exc_arg_values)
context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Init:
def __init__(self, classname, *args):
self.classname = classname
self.args = args
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKESPECIAL(self.classname,
'<init>', args=self.args, returns='V'))
class Yield:
def __init__(self, yield_point):
self.yield_point = yield_point
def process(self, context):
context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.
INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[
'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(
self.classname))
if self.fill:
for i in range(self.size):
context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.
fill, JavaOpcodes.AASTORE())
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP())
if self.size:
context.add_opcodes(ICONST_val(self.size), Init(
'java/util/ArrayList', 'I'))
else:
context.add_opcodes(Init('java/util/ArrayList'))
class add:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(
'java/util/List', 'add', args=['Ljava/lang/Object;'],
returns='Z'), JavaOpcodes.POP())
class Map:
def process(self, context):
context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(), Init('java/util/HashMap'))
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.
INVOKEINTERFACE('java/util/Map', 'get', args=[
'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))
class put:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'), JavaOpcodes.POP())
class putAll:
def process(self, context):
context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',
'putAll', args=['Ljava/util/Map;'], returns='V'))
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args
=['Ljava/lang/String;'], returns='Ljava/lang/Class;'))
class THROW:
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(New(self.exception_class), *self.exc_arg_values)
context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW())
<|reserved_special_token_1|>
from ...java import opcodes as JavaOpcodes
from .primitives import ICONST_val
##########################################################################
# Common Java operations
##########################################################################
class New:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW(self.classname),
JavaOpcodes.DUP()
)
class Init:
def __init__(self, classname, *args):
self.classname = classname
self.args = args
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKESPECIAL(
self.classname,
'<init>',
args=self.args,
returns='V'
),
)
class Yield:
def __init__(self, yield_point):
self.yield_point = yield_point
def process(self, context):
context.add_opcodes(
ICONST_val(self.yield_point),
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Generator',
'yield',
args=['Ljava/util/Map;', 'I'],
returns='V'
),
# "yield" by returning from the generator method.
JavaOpcodes.ARETURN()
)
##########################################################################
# Java types and their operations
##########################################################################
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(
ICONST_val(self.size),
JavaOpcodes.ANEWARRAY(self.classname),
)
if self.fill:
for i in range(self.size):
context.add_opcodes(
JavaOpcodes.DUP(),
ICONST_val(i),
self.fill,
JavaOpcodes.AASTORE(),
)
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP(),
)
if self.size:
context.add_opcodes(
ICONST_val(self.size),
Init('java/util/ArrayList', 'I')
)
else:
context.add_opcodes(
Init('java/util/ArrayList')
)
class add:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/List',
'add',
args=['Ljava/lang/Object;'],
returns='Z'
),
JavaOpcodes.POP(),
)
class Map:
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(),
Init('java/util/HashMap')
)
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.key),
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'get',
args=['Ljava/lang/Object;'],
returns='Ljava/lang/Object;'
)
)
class put:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'put',
args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'
),
JavaOpcodes.POP()
)
class putAll:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'putAll',
args=['Ljava/util/Map;'],
returns='V'
),
)
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC(
'java/lang/Class',
'forName',
args=['Ljava/lang/String;'],
returns='Ljava/lang/Class;'
),
)
class THROW:
# Raise an exception of given type with given arguments
# Example:
# THROW(
# 'org/python/exceptions/AttributeError',
# ['Ljava/lang/String;', JavaOpcodes.LDC_W("Invalid attribute")],
# )
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(
New(self.exception_class),
*self.exc_arg_values
)
context.add_opcodes(
Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW(),
)
|
flexible
|
{
"blob_id": "67e0536dc9f38ab82fe30e715599fed93c5425a5",
"index": 5142,
"step-1": "<mask token>\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-2": "<mask token>\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-3": "<mask token>\n\n\nclass Init:\n <mask token>\n <mask token>\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-4": "<mask token>\n\n\nclass Init:\n\n def __init__(self, classname, *args):\n self.classname = classname\n self.args = args\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKESPECIAL(self.classname,\n '<init>', args=self.args, returns='V'))\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-5": "from ...java import opcodes as JavaOpcodes\n\nfrom .primitives import ICONST_val\n\n\n##########################################################################\n# Common Java operations\n##########################################################################\n\nclass New:\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW(self.classname),\n JavaOpcodes.DUP()\n )\n\n\nclass Init:\n def __init__(self, classname, *args):\n self.classname = classname\n self.args = args\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKESPECIAL(\n self.classname,\n '<init>',\n args=self.args,\n returns='V'\n ),\n )\n\n\nclass Yield:\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(\n ICONST_val(self.yield_point),\n JavaOpcodes.INVOKEVIRTUAL(\n 'org/python/types/Generator',\n 'yield',\n args=['Ljava/util/Map;', 'I'],\n returns='V'\n ),\n # \"yield\" by returning from the generator method.\n JavaOpcodes.ARETURN()\n )\n\n\n##########################################################################\n# Java types and their operations\n##########################################################################\n\nclass Array:\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(\n ICONST_val(self.size),\n JavaOpcodes.ANEWARRAY(self.classname),\n )\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(\n JavaOpcodes.DUP(),\n ICONST_val(i),\n self.fill,\n JavaOpcodes.AASTORE(),\n )\n\n\nclass List:\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP(),\n )\n\n if self.size:\n context.add_opcodes(\n ICONST_val(self.size),\n Init('java/util/ArrayList', 'I')\n )\n else:\n context.add_opcodes(\n Init('java/util/ArrayList')\n )\n\n class add:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List',\n 'add',\n args=['Ljava/lang/Object;'],\n returns='Z'\n ),\n JavaOpcodes.POP(),\n )\n\n\nclass Map:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(),\n Init('java/util/HashMap')\n )\n\n class get:\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.LDC_W(self.key),\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'get',\n args=['Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'\n )\n )\n\n class put:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'put',\n args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'\n ),\n JavaOpcodes.POP()\n )\n\n class putAll:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'putAll',\n args=['Ljava/util/Map;'],\n returns='V'\n ),\n )\n\n\nclass Class:\n class forName:\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC(\n 'java/lang/Class',\n 'forName',\n args=['Ljava/lang/String;'],\n returns='Ljava/lang/Class;'\n ),\n )\n\n\nclass THROW:\n # Raise an exception of given type with given arguments\n # Example:\n # THROW(\n # 'org/python/exceptions/AttributeError',\n # ['Ljava/lang/String;', JavaOpcodes.LDC_W(\"Invalid attribute\")],\n # )\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(\n New(self.exception_class),\n *self.exc_arg_values\n )\n context.add_opcodes(\n Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW(),\n )\n",
"step-ids": [
12,
15,
16,
18,
23
]
}
|
[
12,
15,
16,
18,
23
] |
<|reserved_special_token_0|>
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
try:
pub = publisher.create_topic(topic_name)
print('created pub', pub)
except Exception as e:
print(e, '------e------')
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
try:
sub = subscriber.create_subscription(name=subscription_name, topic=
topic_name)
print('created', sub)
except Exception as e:
print(e, '--------e----------')
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print('msg sent', pub_msg)
data = {'msg': str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + '.json', 'w'))
blob = bucket.blob(name + '.json')
blob.upload_from_filename(name + '.json')
blob.make_public()
os.remove(name + '.json')
return str(data)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f, type(f))
except Exception as e:
future.cancel()
pass
return jsonify(msg_list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
try:
pub = publisher.create_topic(topic_name)
print('created pub', pub)
except Exception as e:
print(e, '------e------')
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
try:
sub = subscriber.create_subscription(name=subscription_name, topic=
topic_name)
print('created', sub)
except Exception as e:
print(e, '--------e----------')
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print('msg sent', pub_msg)
data = {'msg': str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + '.json', 'w'))
blob = bucket.blob(name + '.json')
blob.upload_from_filename(name + '.json')
blob.make_public()
os.remove(name + '.json')
return str(data)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f, type(f))
except Exception as e:
future.cancel()
pass
return jsonify(msg_list)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = (
'/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json'
)
project_id = 'api-8566414966874230052-395627'
client = storage.Client()
bucket = client.get_bucket('publisher_files')
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
try:
pub = publisher.create_topic(topic_name)
print('created pub', pub)
except Exception as e:
print(e, '------e------')
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
try:
sub = subscriber.create_subscription(name=subscription_name, topic=
topic_name)
print('created', sub)
except Exception as e:
print(e, '--------e----------')
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print('msg sent', pub_msg)
data = {'msg': str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + '.json', 'w'))
blob = bucket.blob(name + '.json')
blob.upload_from_filename(name + '.json')
blob.make_public()
os.remove(name + '.json')
return str(data)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f, type(f))
except Exception as e:
future.cancel()
pass
return jsonify(msg_list)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
<|reserved_special_token_1|>
from google.cloud import pubsub_v1
import os
from flask import Flask, request, jsonify
from google.cloud import pubsub_v1
import os
from gcloud import storage
import json
import datetime
import time
app = Flask(__name__)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = (
'/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json'
)
project_id = 'api-8566414966874230052-395627'
client = storage.Client()
bucket = client.get_bucket('publisher_files')
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
try:
pub = publisher.create_topic(topic_name)
print('created pub', pub)
except Exception as e:
print(e, '------e------')
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=
project_id, topic=topic_id)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
try:
sub = subscriber.create_subscription(name=subscription_name, topic=
topic_name)
print('created', sub)
except Exception as e:
print(e, '--------e----------')
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print('msg sent', pub_msg)
data = {'msg': str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + '.json', 'w'))
blob = bucket.blob(name + '.json')
blob.upload_from_filename(name + '.json')
blob.make_public()
os.remove(name + '.json')
return str(data)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id, sub=subscription_id)
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f, type(f))
except Exception as e:
future.cancel()
pass
return jsonify(msg_list)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
<|reserved_special_token_1|>
from google.cloud import pubsub_v1
import os
from flask import Flask, request, jsonify
from google.cloud import pubsub_v1
import os
from gcloud import storage
import json
import datetime
import time
app = Flask(__name__)
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = "/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json"
project_id = "api-8566414966874230052-395627"
client = storage.Client()
bucket = client.get_bucket('publisher_files')
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=project_id,
topic=topic_id, # Set this to something appropriate.
)
try:
pub = publisher.create_topic(topic_name)
print("created pub", pub)
except Exception as e:
print(e,"------e------")
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=project_id,
topic=topic_id,
)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id,
sub=subscription_id,
)
try:
sub = subscriber.create_subscription(
name=subscription_name, topic=topic_name)
print("created", sub)
except Exception as e:
print(e, "--------e----------")
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print("msg sent",pub_msg)
data = {"msg": str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + ".json", 'w'))
blob = bucket.blob(name + ".json")
blob.upload_from_filename(name + ".json")
blob.make_public()
os.remove(name + ".json")
return str(data)
# SUBSCRIBER
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
# topic_name = 'projects/{project_id}/topics/{topic}'.format(
# project_id=project_id,
# topic=topic_id,
# )
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id,
sub=subscription_id, # Set this to something appropriate.
)
#
# try:
# sub = subscriber.create_subscription(
# name=subscription_name, topic=topic_name)
# print("created", sub)
# except Exception as e:
# print(e,"--------e----------")
# pass
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f,type(f))
except Exception as e:
future.cancel()
pass
# subscriber.close()
return jsonify(msg_list)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0',
port=int(os.environ.get(
'PORT', 8080)))
|
flexible
|
{
"blob_id": "a76a0631c97ba539019790e35136f6fd7573e461",
"index": 5469,
"step-1": "<mask token>\n\n\[email protected]('/publish', methods=['GET', 'POST'])\ndef publish():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n subscription_id = sub_user\n msg = request.args.get('msg')\n topic_id = topic_user\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n try:\n pub = publisher.create_topic(topic_name)\n print('created pub', pub)\n except Exception as e:\n print(e, '------e------')\n pass\n subscriber = pubsub_v1.SubscriberClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n try:\n sub = subscriber.create_subscription(name=subscription_name, topic=\n topic_name)\n print('created', sub)\n except Exception as e:\n print(e, '--------e----------')\n pass\n pub_msg = publisher.publish(topic_name, str.encode(msg))\n print('msg sent', pub_msg)\n data = {'msg': str(msg)}\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n name = topic_user + str(st)\n json.dump(data, open(name + '.json', 'w'))\n blob = bucket.blob(name + '.json')\n blob.upload_from_filename(name + '.json')\n blob.make_public()\n os.remove(name + '.json')\n return str(data)\n\n\[email protected]('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n topic_id = topic_user\n subscription_id = sub_user\n msg_list = []\n subscriber = pubsub_v1.SubscriberClient()\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n\n def callback(message):\n print(message.data)\n msg_list.append(message.data.decode('utf-8'))\n message.ack()\n future = subscriber.subscribe(subscription_name, callback)\n try:\n f = future.result(timeout=4.0)\n print(f, type(f))\n except Exception as e:\n future.cancel()\n pass\n return jsonify(msg_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/publish', methods=['GET', 'POST'])\ndef publish():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n subscription_id = sub_user\n msg = request.args.get('msg')\n topic_id = topic_user\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n try:\n pub = publisher.create_topic(topic_name)\n print('created pub', pub)\n except Exception as e:\n print(e, '------e------')\n pass\n subscriber = pubsub_v1.SubscriberClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n try:\n sub = subscriber.create_subscription(name=subscription_name, topic=\n topic_name)\n print('created', sub)\n except Exception as e:\n print(e, '--------e----------')\n pass\n pub_msg = publisher.publish(topic_name, str.encode(msg))\n print('msg sent', pub_msg)\n data = {'msg': str(msg)}\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n name = topic_user + str(st)\n json.dump(data, open(name + '.json', 'w'))\n blob = bucket.blob(name + '.json')\n blob.upload_from_filename(name + '.json')\n blob.make_public()\n os.remove(name + '.json')\n return str(data)\n\n\[email protected]('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n topic_id = topic_user\n subscription_id = sub_user\n msg_list = []\n subscriber = pubsub_v1.SubscriberClient()\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n\n def callback(message):\n print(message.data)\n msg_list.append(message.data.decode('utf-8'))\n message.ack()\n future = subscriber.subscribe(subscription_name, callback)\n try:\n f = future.result(timeout=4.0)\n print(f, type(f))\n except Exception as e:\n future.cancel()\n pass\n return jsonify(msg_list)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))\n",
"step-3": "<mask token>\napp = Flask(__name__)\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = (\n '/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json'\n )\nproject_id = 'api-8566414966874230052-395627'\nclient = storage.Client()\nbucket = client.get_bucket('publisher_files')\n\n\[email protected]('/publish', methods=['GET', 'POST'])\ndef publish():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n subscription_id = sub_user\n msg = request.args.get('msg')\n topic_id = topic_user\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n try:\n pub = publisher.create_topic(topic_name)\n print('created pub', pub)\n except Exception as e:\n print(e, '------e------')\n pass\n subscriber = pubsub_v1.SubscriberClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n try:\n sub = subscriber.create_subscription(name=subscription_name, topic=\n topic_name)\n print('created', sub)\n except Exception as e:\n print(e, '--------e----------')\n pass\n pub_msg = publisher.publish(topic_name, str.encode(msg))\n print('msg sent', pub_msg)\n data = {'msg': str(msg)}\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n name = topic_user + str(st)\n json.dump(data, open(name + '.json', 'w'))\n blob = bucket.blob(name + '.json')\n blob.upload_from_filename(name + '.json')\n blob.make_public()\n os.remove(name + '.json')\n return str(data)\n\n\[email protected]('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n topic_id = topic_user\n subscription_id = sub_user\n msg_list = []\n subscriber = pubsub_v1.SubscriberClient()\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n\n def callback(message):\n print(message.data)\n msg_list.append(message.data.decode('utf-8'))\n message.ack()\n future = subscriber.subscribe(subscription_name, callback)\n try:\n f = future.result(timeout=4.0)\n print(f, type(f))\n except Exception as e:\n future.cancel()\n pass\n return jsonify(msg_list)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))\n",
"step-4": "from google.cloud import pubsub_v1\nimport os\nfrom flask import Flask, request, jsonify\nfrom google.cloud import pubsub_v1\nimport os\nfrom gcloud import storage\nimport json\nimport datetime\nimport time\napp = Flask(__name__)\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = (\n '/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json'\n )\nproject_id = 'api-8566414966874230052-395627'\nclient = storage.Client()\nbucket = client.get_bucket('publisher_files')\n\n\[email protected]('/publish', methods=['GET', 'POST'])\ndef publish():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n subscription_id = sub_user\n msg = request.args.get('msg')\n topic_id = topic_user\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n try:\n pub = publisher.create_topic(topic_name)\n print('created pub', pub)\n except Exception as e:\n print(e, '------e------')\n pass\n subscriber = pubsub_v1.SubscriberClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(project_id=\n project_id, topic=topic_id)\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n try:\n sub = subscriber.create_subscription(name=subscription_name, topic=\n topic_name)\n print('created', sub)\n except Exception as e:\n print(e, '--------e----------')\n pass\n pub_msg = publisher.publish(topic_name, str.encode(msg))\n print('msg sent', pub_msg)\n data = {'msg': str(msg)}\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n name = topic_user + str(st)\n json.dump(data, open(name + '.json', 'w'))\n blob = bucket.blob(name + '.json')\n blob.upload_from_filename(name + '.json')\n blob.make_public()\n os.remove(name + '.json')\n return str(data)\n\n\[email protected]('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n topic_id = topic_user\n subscription_id = sub_user\n msg_list = []\n subscriber = pubsub_v1.SubscriberClient()\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id, sub=subscription_id)\n\n def callback(message):\n print(message.data)\n msg_list.append(message.data.decode('utf-8'))\n message.ack()\n future = subscriber.subscribe(subscription_name, callback)\n try:\n f = future.result(timeout=4.0)\n print(f, type(f))\n except Exception as e:\n future.cancel()\n pass\n return jsonify(msg_list)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))\n",
"step-5": "from google.cloud import pubsub_v1\nimport os\nfrom flask import Flask, request, jsonify\nfrom google.cloud import pubsub_v1\nimport os\nfrom gcloud import storage\nimport json\nimport datetime\nimport time\n\napp = Flask(__name__)\n\nos.environ[\n \"GOOGLE_APPLICATION_CREDENTIALS\"] = \"/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json\"\n\nproject_id = \"api-8566414966874230052-395627\"\n\nclient = storage.Client()\nbucket = client.get_bucket('publisher_files')\n\n\[email protected]('/publish', methods=['GET', 'POST'])\ndef publish():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n subscription_id = sub_user\n\n msg = request.args.get('msg')\n\n topic_id = topic_user\n\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(\n project_id=project_id,\n topic=topic_id, # Set this to something appropriate.\n )\n try:\n pub = publisher.create_topic(topic_name)\n print(\"created pub\", pub)\n except Exception as e:\n print(e,\"------e------\")\n pass\n\n subscriber = pubsub_v1.SubscriberClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(\n project_id=project_id,\n topic=topic_id,\n )\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id,\n sub=subscription_id,\n )\n\n try:\n sub = subscriber.create_subscription(\n name=subscription_name, topic=topic_name)\n print(\"created\", sub)\n except Exception as e:\n print(e, \"--------e----------\")\n pass\n\n pub_msg = publisher.publish(topic_name, str.encode(msg))\n print(\"msg sent\",pub_msg)\n data = {\"msg\": str(msg)}\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n name = topic_user + str(st)\n json.dump(data, open(name + \".json\", 'w'))\n blob = bucket.blob(name + \".json\")\n blob.upload_from_filename(name + \".json\")\n blob.make_public()\n os.remove(name + \".json\")\n\n return str(data)\n\n\n# SUBSCRIBER\[email protected]('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n topic_user = request.args.get('touser')\n sub_user = request.args.get('fromuser')\n\n topic_id = topic_user\n subscription_id = sub_user\n msg_list = []\n\n subscriber = pubsub_v1.SubscriberClient()\n # topic_name = 'projects/{project_id}/topics/{topic}'.format(\n # project_id=project_id,\n # topic=topic_id,\n # )\n subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id,\n sub=subscription_id, # Set this to something appropriate.\n )\n #\n # try:\n # sub = subscriber.create_subscription(\n # name=subscription_name, topic=topic_name)\n # print(\"created\", sub)\n # except Exception as e:\n # print(e,\"--------e----------\")\n # pass\n\n def callback(message):\n print(message.data)\n msg_list.append(message.data.decode('utf-8'))\n message.ack()\n\n future = subscriber.subscribe(subscription_name, callback)\n\n try:\n f = future.result(timeout=4.0)\n print(f,type(f))\n except Exception as e:\n future.cancel()\n pass\n\n # subscriber.close()\n\n return jsonify(msg_list)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0',\n port=int(os.environ.get(\n 'PORT', 8080)))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
##Problem 10 «The number of even elements of the sequence» (Medium)
##Statement
##Determine the number of even elements in the sequence ending with the number 0.
a = True
i = 0
while a is True:
x = int(input())
if x != 0:
if x%2 == 0:
i = i+1
else:
a =False
print(i)
|
normal
|
{
"blob_id": "2eddd446dc59695b185be368b359bae78a868b90",
"index": 9918,
"step-1": "\n##Problem 10 «The number of even elements of the sequence» (Medium)\n##Statement\n##Determine the number of even elements in the sequence ending with the number 0. \n\n\na = True\ni = 0\nwhile a is True:\n x = int(input())\n if x != 0:\n if x%2 == 0:\n i = i+1\n else:\n a =False\nprint(i)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pynucastro as pyna
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(["h1", "he4",
"c12", "c13",
"n13", "n14", "n15",
"o14", "o15", "o16","o17","o18",
"f17", "f18","f19",
"ne18", "ne19", "ne20",
"mg22", "mg24"],
with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=["fe56"])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile="cno_extras.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])
rc.plot(outfile="cno_extras_hide_alpha.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],
rotated=True, highlight_filter_function=lambda r: r.Q > 0,
curved_edges=True, hide_xalpha=True)
|
normal
|
{
"blob_id": "39b07f1a515787e80a1fb822e67e19e2301b894a",
"index": 3285,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc.write_network()\n<mask token>\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-3": "<mask token>\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-4": "import pynucastro as pyna\nrl = pyna.ReacLibLibrary()\nh_burn = rl.linking_nuclei(['h1', 'he4', 'c12', 'c13', 'n13', 'n14', 'n15',\n 'o14', 'o15', 'o16', 'o17', 'o18', 'f17', 'f18', 'f19', 'ne18', 'ne19',\n 'ne20', 'mg22', 'mg24'], with_reverse=False)\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=['fe56'])\nrc.write_network()\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\nrc.plot(outfile='cno_extras.png', rho=1000000.0, T=100000000.0, comp=comp,\n Z_range=[1, 13], N_range=[1, 13])\nrc.plot(outfile='cno_extras_hide_alpha.png', rho=1000000.0, T=100000000.0,\n comp=comp, Z_range=[1, 13], N_range=[1, 13], rotated=True,\n highlight_filter_function=lambda r: r.Q > 0, curved_edges=True,\n hide_xalpha=True)\n",
"step-5": "import pynucastro as pyna\n\nrl = pyna.ReacLibLibrary()\n\nh_burn = rl.linking_nuclei([\"h1\", \"he4\",\n \"c12\", \"c13\",\n \"n13\", \"n14\", \"n15\",\n \"o14\", \"o15\", \"o16\",\"o17\",\"o18\",\n \"f17\", \"f18\",\"f19\",\n \"ne18\", \"ne19\", \"ne20\",\n \"mg22\", \"mg24\"],\n with_reverse=False)\n\n\nrc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=[\"fe56\"])\n\nrc.write_network()\n\ncomp = pyna.Composition(rc.get_nuclei())\ncomp.set_solar_like()\n\nrc.plot(outfile=\"cno_extras.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])\nrc.plot(outfile=\"cno_extras_hide_alpha.png\", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],\n rotated=True, highlight_filter_function=lambda r: r.Q > 0,\n curved_edges=True, hide_xalpha=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Python Challenge - Level 1 - What about making trans?
"""
import string
#import requests
#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')
#res.raise_for_status()
#print(res.text)
INPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz
OUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = """g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \
amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \
rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \
ynnjw ml rfc spj."""
#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))
# The encrypted text told us to apply the same translation to the url
#print('map'.translate(TRANSLATION_TABLE)) # solution here
# Success, let's print out the next level url
print('http://www.pythonchallenge.com/pc/def/ocr.html')
|
normal
|
{
"blob_id": "3c03f71ef9de8825ecd7c89208c79f43c9fb7a56",
"index": 9594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-3": "<mask token>\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-4": "<mask token>\nimport string\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-5": "\"\"\"\nPython Challenge - Level 1 - What about making trans?\n\"\"\"\nimport string\n#import requests\n#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')\n#res.raise_for_status()\n#print(res.text)\n\nINPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz\nOUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = \"\"\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \\\namknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \\\nrfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \\\nynnjw ml rfc spj.\"\"\"\n\n#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))\n\n# The encrypted text told us to apply the same translation to the url\n#print('map'.translate(TRANSLATION_TABLE)) # solution here\n\n# Success, let's print out the next level url\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day = day
self.startTime = startTime
self.endTime = endTime
self.noOfChildren = noOfChildren
self.hourlyRate = hourlyRate
self.value = (endTime - startTime) / 100 * hourlyRate
def __str__(self):
return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.
endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate
) + ' ' + str(self.value)
<|reserved_special_token_0|>
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n = int(input())
jobList = []
for i in range(n):
str = input().strip('\n').split(' ')
if int(str[1]) >= 600 and int(str[2]) <= 2300:
jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),
int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList = sorted(jobList, key=attrgetter('day', 'endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs = [[0]] * maximum
temp = jobList[0].day
j = 0
for i in range(0, len(jobList)):
if jobList[i].day == temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j] = [0, jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
rho = [0]
count = 0
for i in range(1, len(segregatedJob)):
j = i - 1
while j > 0:
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j = j - 1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho)
S = [[(0) for x in range(r)] for y in range(r)]
k = 0
while k < len(S):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[
j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S
[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j -
1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
total += S[length - 1][length - 1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day = day
self.startTime = startTime
self.endTime = endTime
self.noOfChildren = noOfChildren
self.hourlyRate = hourlyRate
self.value = (endTime - startTime) / 100 * hourlyRate
def __str__(self):
return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.
endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate
) + ' ' + str(self.value)
<|reserved_special_token_0|>
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n = int(input())
jobList = []
for i in range(n):
str = input().strip('\n').split(' ')
if int(str[1]) >= 600 and int(str[2]) <= 2300:
jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),
int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList = sorted(jobList, key=attrgetter('day', 'endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs = [[0]] * maximum
temp = jobList[0].day
j = 0
for i in range(0, len(jobList)):
if jobList[i].day == temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j] = [0, jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
rho = [0]
count = 0
for i in range(1, len(segregatedJob)):
j = i - 1
while j > 0:
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j = j - 1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho)
S = [[(0) for x in range(r)] for y in range(r)]
k = 0
while k < len(S):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[
j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S
[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j -
1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
total += S[length - 1][length - 1]
def main():
"""
Main function.
return: None
"""
global total
jobList = takeInput()
jobListSorted = sortInputByEndTimeAndDay(jobList)
maximum = jobListSorted[len(jobListSorted) - 1].day
segregatedJobs = divideJobs(jobListSorted, maximum)
for i in range(len(segregatedJobs)):
algo(segregatedJobs[i])
print(int(total))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day = day
self.startTime = startTime
self.endTime = endTime
self.noOfChildren = noOfChildren
self.hourlyRate = hourlyRate
self.value = (endTime - startTime) / 100 * hourlyRate
def __str__(self):
return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.
endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate
) + ' ' + str(self.value)
total = 0
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n = int(input())
jobList = []
for i in range(n):
str = input().strip('\n').split(' ')
if int(str[1]) >= 600 and int(str[2]) <= 2300:
jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),
int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList = sorted(jobList, key=attrgetter('day', 'endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs = [[0]] * maximum
temp = jobList[0].day
j = 0
for i in range(0, len(jobList)):
if jobList[i].day == temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j] = [0, jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
rho = [0]
count = 0
for i in range(1, len(segregatedJob)):
j = i - 1
while j > 0:
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j = j - 1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho)
S = [[(0) for x in range(r)] for y in range(r)]
k = 0
while k < len(S):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[
j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S
[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j -
1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
total += S[length - 1][length - 1]
def main():
"""
Main function.
return: None
"""
global total
jobList = takeInput()
jobListSorted = sortInputByEndTimeAndDay(jobList)
maximum = jobListSorted[len(jobListSorted) - 1].day
segregatedJobs = divideJobs(jobListSorted, maximum)
for i in range(len(segregatedJobs)):
algo(segregatedJobs[i])
print(int(total))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from operator import *
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day = day
self.startTime = startTime
self.endTime = endTime
self.noOfChildren = noOfChildren
self.hourlyRate = hourlyRate
self.value = (endTime - startTime) / 100 * hourlyRate
def __str__(self):
return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.
endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate
) + ' ' + str(self.value)
total = 0
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n = int(input())
jobList = []
for i in range(n):
str = input().strip('\n').split(' ')
if int(str[1]) >= 600 and int(str[2]) <= 2300:
jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),
int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList = sorted(jobList, key=attrgetter('day', 'endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs = [[0]] * maximum
temp = jobList[0].day
j = 0
for i in range(0, len(jobList)):
if jobList[i].day == temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j] = [0, jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
rho = [0]
count = 0
for i in range(1, len(segregatedJob)):
j = i - 1
while j > 0:
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j = j - 1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho)
S = [[(0) for x in range(r)] for y in range(r)]
k = 0
while k < len(S):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[
j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S
[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j -
1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
total += S[length - 1][length - 1]
def main():
"""
Main function.
return: None
"""
global total
jobList = takeInput()
jobListSorted = sortInputByEndTimeAndDay(jobList)
maximum = jobListSorted[len(jobListSorted) - 1].day
segregatedJobs = divideJobs(jobListSorted, maximum)
for i in range(len(segregatedJobs)):
algo(segregatedJobs[i])
print(int(total))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
file: babysit.py
language: python3
author: [email protected] Parvathi Nair
author: vpb8262 Vishal Bulchandani
"""
"""
To compute the maximum pay a brother and sister can earn considering jobs that they can work on
together or separately depending on the number of children to babysit
"""
from operator import *
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day=day
self.startTime=startTime
self.endTime=endTime
self.noOfChildren=noOfChildren
self.hourlyRate=hourlyRate
self.value=(endTime-startTime)/100*hourlyRate
def __str__(self):
return str(self.day)+ " " + str(self.startTime) + " "+ str(self.endTime) + " " +str(self.noOfChildren) + " " + str(self.hourlyRate)+ " " + str(self.value)
#total is global variable
total = 0
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n=int(input())
jobList=[]
#taking n inputs and creating objects
for i in range (n):
str = input().strip('\n').split(" ")
if int(str[1])>=600 and int(str[2])<=2300:
jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList=sorted(jobList, key= attrgetter('day','endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs=[[0]]*(maximum)
temp=jobList[0].day
j = 0
for i in range(0,len(jobList)):
if jobList[i].day==temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j]=[0,jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
#inserting 0 at the 1st position
rho = [0]
count = 0
#calculating rho
for i in range(1,len(segregatedJob)):
j = i-1
while(j>0):
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j=j-1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho);
S = [[0 for x in range(r)] for y in range(r)]
k = 0
#implementaion of scheduling algorithm
while(k<len(S)):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
#Adding the max pay for every individual field in the matrix
total += S[length-1][length-1]
def main():
"""
Main function.
return: None
"""
global total
jobList=takeInput()
jobListSorted=sortInputByEndTimeAndDay(jobList)
maximum=jobListSorted[len(jobListSorted)-1].day
segregatedJobs=divideJobs(jobListSorted, maximum)
for i in range (len(segregatedJobs)):
algo(segregatedJobs[i])
# print the total pay
print(int(total))
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "f57fa2787934dc2a002f82aa1af1f1d9a7f90da5",
"index": 9947,
"step-1": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom operator import *\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nfile: babysit.py\nlanguage: python3\nauthor: [email protected] Parvathi Nair\nauthor: vpb8262 Vishal Bulchandani\n\n\"\"\"\n\"\"\"\nTo compute the maximum pay a brother and sister can earn considering jobs that they can work on\ntogether or separately depending on the number of children to babysit\n\n\"\"\"\nfrom operator import *\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day=day\n self.startTime=startTime\n self.endTime=endTime\n self.noOfChildren=noOfChildren\n self.hourlyRate=hourlyRate\n self.value=(endTime-startTime)/100*hourlyRate\n\n def __str__(self):\n return str(self.day)+ \" \" + str(self.startTime) + \" \"+ str(self.endTime) + \" \" +str(self.noOfChildren) + \" \" + str(self.hourlyRate)+ \" \" + str(self.value)\n\n#total is global variable\ntotal = 0\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n=int(input())\n jobList=[]\n\n #taking n inputs and creating objects\n for i in range (n):\n str = input().strip('\\n').split(\" \")\n if int(str[1])>=600 and int(str[2])<=2300:\n jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))\n jobList.append(jobs)\n return jobList\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList=sorted(jobList, key= attrgetter('day','endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n\n segregatedJobs=[[0]]*(maximum)\n\n temp=jobList[0].day\n j = 0\n for i in range(0,len(jobList)):\n if jobList[i].day==temp:\n segregatedJobs[j].append(jobList[i])\n\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j]=[0,jobList[i]]\n\n return segregatedJobs\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n\n #inserting 0 at the 1st position\n rho = [0]\n count = 0\n\n #calculating rho\n for i in range(1,len(segregatedJob)):\n j = i-1\n while(j>0):\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j=j-1\n if count == 0:\n rho.append(0)\n count = 0\n\n\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho);\n\n S = [[0 for x in range(r)] for y in range(r)]\n k = 0\n #implementaion of scheduling algorithm\n while(k<len(S)):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n\n #Adding the max pay for every individual field in the matrix\n total += S[length-1][length-1]\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList=takeInput()\n jobListSorted=sortInputByEndTimeAndDay(jobList)\n maximum=jobListSorted[len(jobListSorted)-1].day\n segregatedJobs=divideJobs(jobListSorted, maximum)\n for i in range (len(segregatedJobs)):\n algo(segregatedJobs[i])\n\n # print the total pay\n print(int(total))\n\nif __name__ == '__main__':\n main()",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(windowController, *args, **kwargs):
textView = windowController.textView()
document = windowController.document()
if textView != None:
dateFormat = time.strftime('%Y.%m.%d')
textView.insertText_(dateFormat)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VPScriptSuperMenuTitle = 'GTD'
VPScriptMenuTitle = 'Insert Date'
VPShortcutMask = 'control'
VPShortcutKey = 'J'
<|reserved_special_token_0|>
def main(windowController, *args, **kwargs):
textView = windowController.textView()
document = windowController.document()
if textView != None:
dateFormat = time.strftime('%Y.%m.%d')
textView.insertText_(dateFormat)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VPScriptSuperMenuTitle = 'GTD'
VPScriptMenuTitle = 'Insert Date'
VPShortcutMask = 'control'
VPShortcutKey = 'J'
import AppKit
import time
def main(windowController, *args, **kwargs):
textView = windowController.textView()
document = windowController.document()
if textView != None:
dateFormat = time.strftime('%Y.%m.%d')
textView.insertText_(dateFormat)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
'''
:Title
Insert Date
:Planguage
Python
:Requires
VoodooPad 3.5+
:Description
Inserts Date
EOD
'''
VPScriptSuperMenuTitle = "GTD"
VPScriptMenuTitle = "Insert Date"
VPShortcutMask = "control"
VPShortcutKey = "J"
import AppKit
import time
def main(windowController, *args, **kwargs):
textView = windowController.textView()
document = windowController.document()
if textView != None:
dateFormat = time.strftime("%Y.%m.%d")
textView.insertText_(dateFormat)
|
flexible
|
{
"blob_id": "e51ca78ca6751f8238a39d3eae55d6cc6ab65128",
"index": 5797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(windowController, *args, **kwargs):\n textView = windowController.textView()\n document = windowController.document()\n if textView != None:\n dateFormat = time.strftime('%Y.%m.%d')\n textView.insertText_(dateFormat)\n",
"step-3": "<mask token>\nVPScriptSuperMenuTitle = 'GTD'\nVPScriptMenuTitle = 'Insert Date'\nVPShortcutMask = 'control'\nVPShortcutKey = 'J'\n<mask token>\n\n\ndef main(windowController, *args, **kwargs):\n textView = windowController.textView()\n document = windowController.document()\n if textView != None:\n dateFormat = time.strftime('%Y.%m.%d')\n textView.insertText_(dateFormat)\n",
"step-4": "<mask token>\nVPScriptSuperMenuTitle = 'GTD'\nVPScriptMenuTitle = 'Insert Date'\nVPShortcutMask = 'control'\nVPShortcutKey = 'J'\nimport AppKit\nimport time\n\n\ndef main(windowController, *args, **kwargs):\n textView = windowController.textView()\n document = windowController.document()\n if textView != None:\n dateFormat = time.strftime('%Y.%m.%d')\n textView.insertText_(dateFormat)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n'''\n:Title\nInsert Date\n\n:Planguage\nPython\n\n:Requires\nVoodooPad 3.5+\n\n:Description\nInserts Date\n\nEOD\n'''\nVPScriptSuperMenuTitle = \"GTD\"\nVPScriptMenuTitle = \"Insert Date\"\nVPShortcutMask = \"control\"\nVPShortcutKey = \"J\"\n\nimport AppKit\nimport time\n\ndef main(windowController, *args, **kwargs):\n textView = windowController.textView()\n document = windowController.document()\n\n if textView != None:\n dateFormat = time.strftime(\"%Y.%m.%d\")\n textView.insertText_(dateFormat)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def downgrade():
op.drop_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('address', sa.Text(), nullable=
True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'service', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('description', sa.Text(),
nullable=True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('percent_required', sa.Numeric(
precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'employer', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'tag', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'election', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('date', sa.
Date(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'donor', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('first_name',
sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),
nullable=False), sa.Column('address', sa.Text(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=False), sa.Column(
'longitude', sa.Float(), nullable=False), sa.Column('employer_id',
postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([
'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')
)
op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],
unique=False)
op.create_table(u'committee', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=
True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(
'election_id', postgresql.UUID(), nullable=True), sa.
ForeignKeyConstraint(['election_id'], [u'election.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_committee_election_id', 'committee', ['election_id'
], unique=False)
op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=
False), sa.Column('description', sa.Text(), nullable=True), sa.
Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',
sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),
nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),
nullable=True), sa.Column('election_id', postgresql.UUID(),
nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [
u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [
u'election.id']), sa.PrimaryKeyConstraint('id'))
op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [
'election_id'], unique=False)
op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [
'ballot_type_id'], unique=False)
op.create_table(u'donation', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('amount', sa.
Float(), nullable=False), sa.Column('transaction_date', sa.Date(),
nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=
False), sa.Column('committee_id', postgresql.UUID(), nullable=False
), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa
.ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_donation_committee_id', 'donation', ['committee_id'
], unique=False)
op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],
unique=False)
op.create_table(u'contract', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('payment', sa
.Float(), nullable=False), sa.Column('consultant_id', postgresql.
UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),
nullable=True), sa.Column('description', sa.Text(), nullable=True),
sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.
ForeignKeyConstraint(['service_id'], [u'service.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_contract_consultant_id', 'contract', [
'consultant_id'], unique=False)
op.create_index('ix_contract_service_id', 'contract', ['service_id'],
unique=False)
op.create_index('ix_contract_committee_id', 'contract', ['committee_id'
], unique=False)
op.create_table(u'stance', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('voted_yes',
sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql
.UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql
.UUID(), nullable=False), sa.ForeignKeyConstraint([
'ballot_measure_id'], [u'ballot_measure.id']), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',
'ballot_measure_id'))
op.create_index('ix_stance_ballot_measure_id', 'stance', [
'ballot_measure_id'], unique=False)
op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],
unique=False)
op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID
(), server_default=func.uuid_generate_v4(), nullable=False), sa.
Column('created', sa.DateTime(timezone=True), server_default=func.
now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=
True), server_default=func.now(), nullable=False), sa.Column(
'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(
'tag_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),
sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',
'tag_id'))
op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',
['tag_id'], unique=False)
op.create_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags', ['ballot_measure_id'], unique=False)
def downgrade():
op.drop_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '1f2296edbc75'
down_revision = '7417382a3f1'
<|reserved_special_token_0|>
def upgrade():
op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('address', sa.Text(), nullable=
True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'service', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('description', sa.Text(),
nullable=True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('percent_required', sa.Numeric(
precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'employer', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'tag', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'election', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('date', sa.
Date(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'donor', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('first_name',
sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),
nullable=False), sa.Column('address', sa.Text(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=False), sa.Column(
'longitude', sa.Float(), nullable=False), sa.Column('employer_id',
postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([
'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')
)
op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],
unique=False)
op.create_table(u'committee', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=
True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(
'election_id', postgresql.UUID(), nullable=True), sa.
ForeignKeyConstraint(['election_id'], [u'election.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_committee_election_id', 'committee', ['election_id'
], unique=False)
op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=
False), sa.Column('description', sa.Text(), nullable=True), sa.
Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',
sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),
nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),
nullable=True), sa.Column('election_id', postgresql.UUID(),
nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [
u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [
u'election.id']), sa.PrimaryKeyConstraint('id'))
op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [
'election_id'], unique=False)
op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [
'ballot_type_id'], unique=False)
op.create_table(u'donation', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('amount', sa.
Float(), nullable=False), sa.Column('transaction_date', sa.Date(),
nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=
False), sa.Column('committee_id', postgresql.UUID(), nullable=False
), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa
.ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_donation_committee_id', 'donation', ['committee_id'
], unique=False)
op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],
unique=False)
op.create_table(u'contract', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('payment', sa
.Float(), nullable=False), sa.Column('consultant_id', postgresql.
UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),
nullable=True), sa.Column('description', sa.Text(), nullable=True),
sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.
ForeignKeyConstraint(['service_id'], [u'service.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_contract_consultant_id', 'contract', [
'consultant_id'], unique=False)
op.create_index('ix_contract_service_id', 'contract', ['service_id'],
unique=False)
op.create_index('ix_contract_committee_id', 'contract', ['committee_id'
], unique=False)
op.create_table(u'stance', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('voted_yes',
sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql
.UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql
.UUID(), nullable=False), sa.ForeignKeyConstraint([
'ballot_measure_id'], [u'ballot_measure.id']), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',
'ballot_measure_id'))
op.create_index('ix_stance_ballot_measure_id', 'stance', [
'ballot_measure_id'], unique=False)
op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],
unique=False)
op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID
(), server_default=func.uuid_generate_v4(), nullable=False), sa.
Column('created', sa.DateTime(timezone=True), server_default=func.
now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=
True), server_default=func.now(), nullable=False), sa.Column(
'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(
'tag_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),
sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',
'tag_id'))
op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',
['tag_id'], unique=False)
op.create_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags', ['ballot_measure_id'], unique=False)
def downgrade():
op.drop_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '1f2296edbc75'
down_revision = '7417382a3f1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy import func
def upgrade():
op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('address', sa.Text(), nullable=
True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'service', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('description', sa.Text(),
nullable=True), sa.PrimaryKeyConstraint('id'))
op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('percent_required', sa.Numeric(
precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'employer', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'tag', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'election', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('date', sa.
Date(), nullable=False), sa.PrimaryKeyConstraint('id'))
op.create_table(u'donor', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('first_name',
sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),
nullable=False), sa.Column('address', sa.Text(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=False), sa.Column(
'longitude', sa.Float(), nullable=False), sa.Column('employer_id',
postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([
'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')
)
op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],
unique=False)
op.create_table(u'committee', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=
True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(
'election_id', postgresql.UUID(), nullable=True), sa.
ForeignKeyConstraint(['election_id'], [u'election.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_committee_election_id', 'committee', ['election_id'
], unique=False)
op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('name', sa.
Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=
False), sa.Column('description', sa.Text(), nullable=True), sa.
Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',
sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),
nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),
nullable=True), sa.Column('election_id', postgresql.UUID(),
nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [
u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [
u'election.id']), sa.PrimaryKeyConstraint('id'))
op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [
'election_id'], unique=False)
op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [
'ballot_type_id'], unique=False)
op.create_table(u'donation', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('amount', sa.
Float(), nullable=False), sa.Column('transaction_date', sa.Date(),
nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=
False), sa.Column('committee_id', postgresql.UUID(), nullable=False
), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa
.ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_donation_committee_id', 'donation', ['committee_id'
], unique=False)
op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],
unique=False)
op.create_table(u'contract', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('payment', sa
.Float(), nullable=False), sa.Column('consultant_id', postgresql.
UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),
nullable=True), sa.Column('description', sa.Text(), nullable=True),
sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.
ForeignKeyConstraint(['service_id'], [u'service.id']), sa.
PrimaryKeyConstraint('id'))
op.create_index('ix_contract_consultant_id', 'contract', [
'consultant_id'], unique=False)
op.create_index('ix_contract_service_id', 'contract', ['service_id'],
unique=False)
op.create_index('ix_contract_committee_id', 'contract', ['committee_id'
], unique=False)
op.create_table(u'stance', sa.Column('id', postgresql.UUID(),
server_default=func.uuid_generate_v4(), nullable=False), sa.Column(
'created', sa.DateTime(timezone=True), server_default=func.now(),
nullable=False), sa.Column('updated', sa.DateTime(timezone=True),
server_default=func.now(), nullable=False), sa.Column('voted_yes',
sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql
.UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql
.UUID(), nullable=False), sa.ForeignKeyConstraint([
'ballot_measure_id'], [u'ballot_measure.id']), sa.
ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',
'ballot_measure_id'))
op.create_index('ix_stance_ballot_measure_id', 'stance', [
'ballot_measure_id'], unique=False)
op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],
unique=False)
op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID
(), server_default=func.uuid_generate_v4(), nullable=False), sa.
Column('created', sa.DateTime(timezone=True), server_default=func.
now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=
True), server_default=func.now(), nullable=False), sa.Column(
'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(
'tag_id', postgresql.UUID(), nullable=False), sa.
ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),
sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.
PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',
'tag_id'))
op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',
['tag_id'], unique=False)
op.create_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags', ['ballot_measure_id'], unique=False)
def downgrade():
op.drop_index('ix_ballot_measure_tags_ballot_measure_id',
'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
<|reserved_special_token_1|>
"""Initial migration
Revision ID: 1f2296edbc75
Revises: 7417382a3f1
Create Date: 2014-01-19 23:04:58.877817
"""
# revision identifiers, used by Alembic.
revision = '1f2296edbc75'
down_revision = '7417382a3f1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy import func
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'consultant',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('address', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'service',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'ballot_type',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('percent_required', sa.Numeric(precision=2, scale=2), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'employer',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'tag',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'election',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'donor',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('first_name', sa.Text(), nullable=False),
sa.Column('last_name', sa.Text(), nullable=False),
sa.Column('address', sa.Text(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=False),
sa.Column('longitude', sa.Float(), nullable=False),
sa.Column('employer_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['employer_id'], [u'employer.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('first_name','last_name','latitude','longitude')
)
op.create_index('ix_donor_employer_id', 'donor', ['employer_id'], unique=False)
op.create_table(u'committee',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('filer_id', sa.Text(), nullable=True),
sa.Column('sponsor', sa.Text(), nullable=True),
sa.Column('election_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_committee_election_id', 'committee', ['election_id'], unique=False)
op.create_table(u'ballot_measure',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('prop_id', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('num_yes', sa.Integer(), nullable=True),
sa.Column('num_no', sa.Integer(), nullable=True),
sa.Column('passed', sa.Boolean(), nullable=True),
sa.Column('ballot_type_id', postgresql.UUID(), nullable=True),
sa.Column('election_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['ballot_type_id'], [u'ballot_type.id'], ),
sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_ballot_measure_election_id', 'ballot_measure', ['election_id'], unique=False)
op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', ['ballot_type_id'], unique=False)
op.create_table(u'donation',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('amount', sa.Float(), nullable=False),
sa.Column('transaction_date', sa.Date(), nullable=False),
sa.Column('donor_id', postgresql.UUID(), nullable=False),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.ForeignKeyConstraint(['donor_id'], [u'donor.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_donation_committee_id', 'donation', ['committee_id'], unique=False)
op.create_index('ix_donation_donor_id', 'donation', ['donor_id'], unique=False)
op.create_table(u'contract',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('payment', sa.Float(), nullable=False),
sa.Column('consultant_id', postgresql.UUID(), nullable=False),
sa.Column('service_id', postgresql.UUID(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.ForeignKeyConstraint(['consultant_id'], [u'consultant.id'], ),
sa.ForeignKeyConstraint(['service_id'], [u'service.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_contract_consultant_id', 'contract', ['consultant_id'], unique=False)
op.create_index('ix_contract_service_id', 'contract', ['service_id'], unique=False)
op.create_index('ix_contract_committee_id', 'contract', ['committee_id'], unique=False)
op.create_table(u'stance',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('voted_yes', sa.Boolean(), nullable=False),
sa.Column('committee_id', postgresql.UUID(), nullable=False),
sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),
sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('committee_id','ballot_measure_id')
)
op.create_index('ix_stance_ballot_measure_id', 'stance', ['ballot_measure_id'], unique=False)
op.create_index('ix_stance_committee_id', 'stance', ['committee_id'], unique=False)
op.create_table(u'ballot_measure_tags',
sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),
sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),
sa.Column('tag_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),
sa.ForeignKeyConstraint(['tag_id'], [u'tag.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('ballot_measure_id','tag_id')
)
op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags', ['tag_id'], unique=False)
op.create_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags', ['ballot_measure_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags')
op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')
op.drop_table(u'ballot_measure_tags')
op.drop_index('ix_stance_committee_id', 'stance')
op.drop_index('ix_stance_ballot_measure_id', 'stance')
op.drop_table(u'stance')
op.drop_index('ix_contract_committee_id', 'contract')
op.drop_index('ix_contract_service_id', 'contract')
op.drop_index('ix_contract_consultant_id', 'contract')
op.drop_table(u'contract')
op.drop_index('ix_donation_donor_id', 'donation')
op.drop_index('ix_donation_committee_id', 'donation')
op.drop_table(u'donation')
op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')
op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')
op.drop_table(u'ballot_measure')
op.drop_index('ix_committee_election_id', 'committee')
op.drop_table(u'committee')
op.drop_index('ix_donor_employer_id', 'donor')
op.drop_table(u'donor')
op.drop_table(u'election')
op.drop_table(u'tag')
op.drop_table(u'employer')
op.drop_table(u'ballot_type')
op.drop_table(u'service')
op.drop_table(u'consultant')
### end Alembic commands ###
|
flexible
|
{
"blob_id": "7df55853d0f4f1bf56512c4427d7f91e9c1f2279",
"index": 6524,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags')\n op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')\n op.drop_table(u'ballot_measure_tags')\n op.drop_index('ix_stance_committee_id', 'stance')\n op.drop_index('ix_stance_ballot_measure_id', 'stance')\n op.drop_table(u'stance')\n op.drop_index('ix_contract_committee_id', 'contract')\n op.drop_index('ix_contract_service_id', 'contract')\n op.drop_index('ix_contract_consultant_id', 'contract')\n op.drop_table(u'contract')\n op.drop_index('ix_donation_donor_id', 'donation')\n op.drop_index('ix_donation_committee_id', 'donation')\n op.drop_table(u'donation')\n op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')\n op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')\n op.drop_table(u'ballot_measure')\n op.drop_index('ix_committee_election_id', 'committee')\n op.drop_table(u'committee')\n op.drop_index('ix_donor_employer_id', 'donor')\n op.drop_table(u'donor')\n op.drop_table(u'election')\n op.drop_table(u'tag')\n op.drop_table(u'employer')\n op.drop_table(u'ballot_type')\n op.drop_table(u'service')\n op.drop_table(u'consultant')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('address', sa.Text(), nullable=\n True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'service', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('description', sa.Text(),\n nullable=True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('percent_required', sa.Numeric(\n precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'employer', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'tag', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'election', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('date', sa.\n Date(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'donor', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('first_name',\n sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),\n nullable=False), sa.Column('address', sa.Text(), nullable=False),\n sa.Column('latitude', sa.Float(), nullable=False), sa.Column(\n 'longitude', sa.Float(), nullable=False), sa.Column('employer_id',\n postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([\n 'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')\n )\n op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],\n unique=False)\n op.create_table(u'committee', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=\n True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(\n 'election_id', postgresql.UUID(), nullable=True), sa.\n ForeignKeyConstraint(['election_id'], [u'election.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_committee_election_id', 'committee', ['election_id'\n ], unique=False)\n op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=\n False), sa.Column('description', sa.Text(), nullable=True), sa.\n Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',\n sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),\n nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),\n nullable=True), sa.Column('election_id', postgresql.UUID(),\n nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [\n u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [\n u'election.id']), sa.PrimaryKeyConstraint('id'))\n op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [\n 'election_id'], unique=False)\n op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [\n 'ballot_type_id'], unique=False)\n op.create_table(u'donation', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('amount', sa.\n Float(), nullable=False), sa.Column('transaction_date', sa.Date(),\n nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=\n False), sa.Column('committee_id', postgresql.UUID(), nullable=False\n ), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa\n .ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_donation_committee_id', 'donation', ['committee_id'\n ], unique=False)\n op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],\n unique=False)\n op.create_table(u'contract', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('payment', sa\n .Float(), nullable=False), sa.Column('consultant_id', postgresql.\n UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),\n nullable=True), sa.Column('description', sa.Text(), nullable=True),\n sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.\n ForeignKeyConstraint(['service_id'], [u'service.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_contract_consultant_id', 'contract', [\n 'consultant_id'], unique=False)\n op.create_index('ix_contract_service_id', 'contract', ['service_id'],\n unique=False)\n op.create_index('ix_contract_committee_id', 'contract', ['committee_id'\n ], unique=False)\n op.create_table(u'stance', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('voted_yes',\n sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql\n .UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql\n .UUID(), nullable=False), sa.ForeignKeyConstraint([\n 'ballot_measure_id'], [u'ballot_measure.id']), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',\n 'ballot_measure_id'))\n op.create_index('ix_stance_ballot_measure_id', 'stance', [\n 'ballot_measure_id'], unique=False)\n op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],\n unique=False)\n op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID\n (), server_default=func.uuid_generate_v4(), nullable=False), sa.\n Column('created', sa.DateTime(timezone=True), server_default=func.\n now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=\n True), server_default=func.now(), nullable=False), sa.Column(\n 'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(\n 'tag_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),\n sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',\n 'tag_id'))\n op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',\n ['tag_id'], unique=False)\n op.create_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags', ['ballot_measure_id'], unique=False)\n\n\ndef downgrade():\n op.drop_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags')\n op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')\n op.drop_table(u'ballot_measure_tags')\n op.drop_index('ix_stance_committee_id', 'stance')\n op.drop_index('ix_stance_ballot_measure_id', 'stance')\n op.drop_table(u'stance')\n op.drop_index('ix_contract_committee_id', 'contract')\n op.drop_index('ix_contract_service_id', 'contract')\n op.drop_index('ix_contract_consultant_id', 'contract')\n op.drop_table(u'contract')\n op.drop_index('ix_donation_donor_id', 'donation')\n op.drop_index('ix_donation_committee_id', 'donation')\n op.drop_table(u'donation')\n op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')\n op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')\n op.drop_table(u'ballot_measure')\n op.drop_index('ix_committee_election_id', 'committee')\n op.drop_table(u'committee')\n op.drop_index('ix_donor_employer_id', 'donor')\n op.drop_table(u'donor')\n op.drop_table(u'election')\n op.drop_table(u'tag')\n op.drop_table(u'employer')\n op.drop_table(u'ballot_type')\n op.drop_table(u'service')\n op.drop_table(u'consultant')\n",
"step-3": "<mask token>\nrevision = '1f2296edbc75'\ndown_revision = '7417382a3f1'\n<mask token>\n\n\ndef upgrade():\n op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('address', sa.Text(), nullable=\n True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'service', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('description', sa.Text(),\n nullable=True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('percent_required', sa.Numeric(\n precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'employer', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'tag', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'election', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('date', sa.\n Date(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'donor', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('first_name',\n sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),\n nullable=False), sa.Column('address', sa.Text(), nullable=False),\n sa.Column('latitude', sa.Float(), nullable=False), sa.Column(\n 'longitude', sa.Float(), nullable=False), sa.Column('employer_id',\n postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([\n 'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')\n )\n op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],\n unique=False)\n op.create_table(u'committee', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=\n True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(\n 'election_id', postgresql.UUID(), nullable=True), sa.\n ForeignKeyConstraint(['election_id'], [u'election.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_committee_election_id', 'committee', ['election_id'\n ], unique=False)\n op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=\n False), sa.Column('description', sa.Text(), nullable=True), sa.\n Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',\n sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),\n nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),\n nullable=True), sa.Column('election_id', postgresql.UUID(),\n nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [\n u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [\n u'election.id']), sa.PrimaryKeyConstraint('id'))\n op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [\n 'election_id'], unique=False)\n op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [\n 'ballot_type_id'], unique=False)\n op.create_table(u'donation', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('amount', sa.\n Float(), nullable=False), sa.Column('transaction_date', sa.Date(),\n nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=\n False), sa.Column('committee_id', postgresql.UUID(), nullable=False\n ), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa\n .ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_donation_committee_id', 'donation', ['committee_id'\n ], unique=False)\n op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],\n unique=False)\n op.create_table(u'contract', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('payment', sa\n .Float(), nullable=False), sa.Column('consultant_id', postgresql.\n UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),\n nullable=True), sa.Column('description', sa.Text(), nullable=True),\n sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.\n ForeignKeyConstraint(['service_id'], [u'service.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_contract_consultant_id', 'contract', [\n 'consultant_id'], unique=False)\n op.create_index('ix_contract_service_id', 'contract', ['service_id'],\n unique=False)\n op.create_index('ix_contract_committee_id', 'contract', ['committee_id'\n ], unique=False)\n op.create_table(u'stance', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('voted_yes',\n sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql\n .UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql\n .UUID(), nullable=False), sa.ForeignKeyConstraint([\n 'ballot_measure_id'], [u'ballot_measure.id']), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',\n 'ballot_measure_id'))\n op.create_index('ix_stance_ballot_measure_id', 'stance', [\n 'ballot_measure_id'], unique=False)\n op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],\n unique=False)\n op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID\n (), server_default=func.uuid_generate_v4(), nullable=False), sa.\n Column('created', sa.DateTime(timezone=True), server_default=func.\n now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=\n True), server_default=func.now(), nullable=False), sa.Column(\n 'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(\n 'tag_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),\n sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',\n 'tag_id'))\n op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',\n ['tag_id'], unique=False)\n op.create_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags', ['ballot_measure_id'], unique=False)\n\n\ndef downgrade():\n op.drop_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags')\n op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')\n op.drop_table(u'ballot_measure_tags')\n op.drop_index('ix_stance_committee_id', 'stance')\n op.drop_index('ix_stance_ballot_measure_id', 'stance')\n op.drop_table(u'stance')\n op.drop_index('ix_contract_committee_id', 'contract')\n op.drop_index('ix_contract_service_id', 'contract')\n op.drop_index('ix_contract_consultant_id', 'contract')\n op.drop_table(u'contract')\n op.drop_index('ix_donation_donor_id', 'donation')\n op.drop_index('ix_donation_committee_id', 'donation')\n op.drop_table(u'donation')\n op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')\n op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')\n op.drop_table(u'ballot_measure')\n op.drop_index('ix_committee_election_id', 'committee')\n op.drop_table(u'committee')\n op.drop_index('ix_donor_employer_id', 'donor')\n op.drop_table(u'donor')\n op.drop_table(u'election')\n op.drop_table(u'tag')\n op.drop_table(u'employer')\n op.drop_table(u'ballot_type')\n op.drop_table(u'service')\n op.drop_table(u'consultant')\n",
"step-4": "<mask token>\nrevision = '1f2296edbc75'\ndown_revision = '7417382a3f1'\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy import func\n\n\ndef upgrade():\n op.create_table(u'consultant', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('address', sa.Text(), nullable=\n True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'service', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('description', sa.Text(),\n nullable=True), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'ballot_type', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('percent_required', sa.Numeric(\n precision=2, scale=2), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'employer', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'tag', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'election', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('date', sa.\n Date(), nullable=False), sa.PrimaryKeyConstraint('id'))\n op.create_table(u'donor', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('first_name',\n sa.Text(), nullable=False), sa.Column('last_name', sa.Text(),\n nullable=False), sa.Column('address', sa.Text(), nullable=False),\n sa.Column('latitude', sa.Float(), nullable=False), sa.Column(\n 'longitude', sa.Float(), nullable=False), sa.Column('employer_id',\n postgresql.UUID(), nullable=True), sa.ForeignKeyConstraint([\n 'employer_id'], [u'employer.id']), sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('first_name', 'last_name', 'latitude', 'longitude')\n )\n op.create_index('ix_donor_employer_id', 'donor', ['employer_id'],\n unique=False)\n op.create_table(u'committee', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=False), sa.Column('filer_id', sa.Text(), nullable=\n True), sa.Column('sponsor', sa.Text(), nullable=True), sa.Column(\n 'election_id', postgresql.UUID(), nullable=True), sa.\n ForeignKeyConstraint(['election_id'], [u'election.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_committee_election_id', 'committee', ['election_id'\n ], unique=False)\n op.create_table(u'ballot_measure', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('name', sa.\n Text(), nullable=True), sa.Column('prop_id', sa.Text(), nullable=\n False), sa.Column('description', sa.Text(), nullable=True), sa.\n Column('num_yes', sa.Integer(), nullable=True), sa.Column('num_no',\n sa.Integer(), nullable=True), sa.Column('passed', sa.Boolean(),\n nullable=True), sa.Column('ballot_type_id', postgresql.UUID(),\n nullable=True), sa.Column('election_id', postgresql.UUID(),\n nullable=True), sa.ForeignKeyConstraint(['ballot_type_id'], [\n u'ballot_type.id']), sa.ForeignKeyConstraint(['election_id'], [\n u'election.id']), sa.PrimaryKeyConstraint('id'))\n op.create_index('ix_ballot_measure_election_id', 'ballot_measure', [\n 'election_id'], unique=False)\n op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', [\n 'ballot_type_id'], unique=False)\n op.create_table(u'donation', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('amount', sa.\n Float(), nullable=False), sa.Column('transaction_date', sa.Date(),\n nullable=False), sa.Column('donor_id', postgresql.UUID(), nullable=\n False), sa.Column('committee_id', postgresql.UUID(), nullable=False\n ), sa.ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa\n .ForeignKeyConstraint(['donor_id'], [u'donor.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_donation_committee_id', 'donation', ['committee_id'\n ], unique=False)\n op.create_index('ix_donation_donor_id', 'donation', ['donor_id'],\n unique=False)\n op.create_table(u'contract', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('payment', sa\n .Float(), nullable=False), sa.Column('consultant_id', postgresql.\n UUID(), nullable=False), sa.Column('service_id', postgresql.UUID(),\n nullable=True), sa.Column('description', sa.Text(), nullable=True),\n sa.Column('committee_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n ForeignKeyConstraint(['consultant_id'], [u'consultant.id']), sa.\n ForeignKeyConstraint(['service_id'], [u'service.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_index('ix_contract_consultant_id', 'contract', [\n 'consultant_id'], unique=False)\n op.create_index('ix_contract_service_id', 'contract', ['service_id'],\n unique=False)\n op.create_index('ix_contract_committee_id', 'contract', ['committee_id'\n ], unique=False)\n op.create_table(u'stance', sa.Column('id', postgresql.UUID(),\n server_default=func.uuid_generate_v4(), nullable=False), sa.Column(\n 'created', sa.DateTime(timezone=True), server_default=func.now(),\n nullable=False), sa.Column('updated', sa.DateTime(timezone=True),\n server_default=func.now(), nullable=False), sa.Column('voted_yes',\n sa.Boolean(), nullable=False), sa.Column('committee_id', postgresql\n .UUID(), nullable=False), sa.Column('ballot_measure_id', postgresql\n .UUID(), nullable=False), sa.ForeignKeyConstraint([\n 'ballot_measure_id'], [u'ballot_measure.id']), sa.\n ForeignKeyConstraint(['committee_id'], [u'committee.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('committee_id',\n 'ballot_measure_id'))\n op.create_index('ix_stance_ballot_measure_id', 'stance', [\n 'ballot_measure_id'], unique=False)\n op.create_index('ix_stance_committee_id', 'stance', ['committee_id'],\n unique=False)\n op.create_table(u'ballot_measure_tags', sa.Column('id', postgresql.UUID\n (), server_default=func.uuid_generate_v4(), nullable=False), sa.\n Column('created', sa.DateTime(timezone=True), server_default=func.\n now(), nullable=False), sa.Column('updated', sa.DateTime(timezone=\n True), server_default=func.now(), nullable=False), sa.Column(\n 'ballot_measure_id', postgresql.UUID(), nullable=False), sa.Column(\n 'tag_id', postgresql.UUID(), nullable=False), sa.\n ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id']),\n sa.ForeignKeyConstraint(['tag_id'], [u'tag.id']), sa.\n PrimaryKeyConstraint('id'), sa.UniqueConstraint('ballot_measure_id',\n 'tag_id'))\n op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags',\n ['tag_id'], unique=False)\n op.create_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags', ['ballot_measure_id'], unique=False)\n\n\ndef downgrade():\n op.drop_index('ix_ballot_measure_tags_ballot_measure_id',\n 'ballot_measure_tags')\n op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')\n op.drop_table(u'ballot_measure_tags')\n op.drop_index('ix_stance_committee_id', 'stance')\n op.drop_index('ix_stance_ballot_measure_id', 'stance')\n op.drop_table(u'stance')\n op.drop_index('ix_contract_committee_id', 'contract')\n op.drop_index('ix_contract_service_id', 'contract')\n op.drop_index('ix_contract_consultant_id', 'contract')\n op.drop_table(u'contract')\n op.drop_index('ix_donation_donor_id', 'donation')\n op.drop_index('ix_donation_committee_id', 'donation')\n op.drop_table(u'donation')\n op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')\n op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')\n op.drop_table(u'ballot_measure')\n op.drop_index('ix_committee_election_id', 'committee')\n op.drop_table(u'committee')\n op.drop_index('ix_donor_employer_id', 'donor')\n op.drop_table(u'donor')\n op.drop_table(u'election')\n op.drop_table(u'tag')\n op.drop_table(u'employer')\n op.drop_table(u'ballot_type')\n op.drop_table(u'service')\n op.drop_table(u'consultant')\n",
"step-5": "\"\"\"Initial migration\n\nRevision ID: 1f2296edbc75\nRevises: 7417382a3f1\nCreate Date: 2014-01-19 23:04:58.877817\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1f2296edbc75'\ndown_revision = '7417382a3f1'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy import func\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table(u'consultant',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.Column('address', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'service',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'ballot_type',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.Column('percent_required', sa.Numeric(precision=2, scale=2), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'employer',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'tag',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'election',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('date', sa.Date(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(u'donor',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('first_name', sa.Text(), nullable=False),\n sa.Column('last_name', sa.Text(), nullable=False),\n sa.Column('address', sa.Text(), nullable=False),\n sa.Column('latitude', sa.Float(), nullable=False),\n sa.Column('longitude', sa.Float(), nullable=False),\n sa.Column('employer_id', postgresql.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['employer_id'], [u'employer.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('first_name','last_name','latitude','longitude')\n )\n op.create_index('ix_donor_employer_id', 'donor', ['employer_id'], unique=False)\n op.create_table(u'committee',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=False),\n sa.Column('filer_id', sa.Text(), nullable=True),\n sa.Column('sponsor', sa.Text(), nullable=True),\n sa.Column('election_id', postgresql.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_committee_election_id', 'committee', ['election_id'], unique=False)\n op.create_table(u'ballot_measure',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('name', sa.Text(), nullable=True),\n sa.Column('prop_id', sa.Text(), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('num_yes', sa.Integer(), nullable=True),\n sa.Column('num_no', sa.Integer(), nullable=True),\n sa.Column('passed', sa.Boolean(), nullable=True),\n sa.Column('ballot_type_id', postgresql.UUID(), nullable=True),\n sa.Column('election_id', postgresql.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['ballot_type_id'], [u'ballot_type.id'], ),\n sa.ForeignKeyConstraint(['election_id'], [u'election.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_ballot_measure_election_id', 'ballot_measure', ['election_id'], unique=False)\n op.create_index('ix_ballot_measure_ballot_type_id', 'ballot_measure', ['ballot_type_id'], unique=False)\n op.create_table(u'donation',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('amount', sa.Float(), nullable=False),\n sa.Column('transaction_date', sa.Date(), nullable=False),\n sa.Column('donor_id', postgresql.UUID(), nullable=False),\n sa.Column('committee_id', postgresql.UUID(), nullable=False),\n sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),\n sa.ForeignKeyConstraint(['donor_id'], [u'donor.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_donation_committee_id', 'donation', ['committee_id'], unique=False)\n op.create_index('ix_donation_donor_id', 'donation', ['donor_id'], unique=False)\n op.create_table(u'contract',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('payment', sa.Float(), nullable=False),\n sa.Column('consultant_id', postgresql.UUID(), nullable=False),\n sa.Column('service_id', postgresql.UUID(), nullable=True),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('committee_id', postgresql.UUID(), nullable=False),\n sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),\n sa.ForeignKeyConstraint(['consultant_id'], [u'consultant.id'], ),\n sa.ForeignKeyConstraint(['service_id'], [u'service.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_contract_consultant_id', 'contract', ['consultant_id'], unique=False)\n op.create_index('ix_contract_service_id', 'contract', ['service_id'], unique=False)\n op.create_index('ix_contract_committee_id', 'contract', ['committee_id'], unique=False)\n op.create_table(u'stance',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('voted_yes', sa.Boolean(), nullable=False),\n sa.Column('committee_id', postgresql.UUID(), nullable=False),\n sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),\n sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),\n sa.ForeignKeyConstraint(['committee_id'], [u'committee.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('committee_id','ballot_measure_id')\n )\n op.create_index('ix_stance_ballot_measure_id', 'stance', ['ballot_measure_id'], unique=False)\n op.create_index('ix_stance_committee_id', 'stance', ['committee_id'], unique=False)\n op.create_table(u'ballot_measure_tags',\n sa.Column('id', postgresql.UUID(), server_default=func.uuid_generate_v4(), nullable=False),\n sa.Column('created', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('ballot_measure_id', postgresql.UUID(), nullable=False),\n sa.Column('tag_id', postgresql.UUID(), nullable=False),\n sa.ForeignKeyConstraint(['ballot_measure_id'], [u'ballot_measure.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], [u'tag.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('ballot_measure_id','tag_id')\n )\n op.create_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags', ['tag_id'], unique=False)\n op.create_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags', ['ballot_measure_id'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_ballot_measure_tags_ballot_measure_id', 'ballot_measure_tags')\n op.drop_index('ix_ballot_measure_tags_tag_id', 'ballot_measure_tags')\n op.drop_table(u'ballot_measure_tags')\n op.drop_index('ix_stance_committee_id', 'stance')\n op.drop_index('ix_stance_ballot_measure_id', 'stance')\n op.drop_table(u'stance')\n op.drop_index('ix_contract_committee_id', 'contract')\n op.drop_index('ix_contract_service_id', 'contract')\n op.drop_index('ix_contract_consultant_id', 'contract')\n op.drop_table(u'contract')\n op.drop_index('ix_donation_donor_id', 'donation')\n op.drop_index('ix_donation_committee_id', 'donation')\n op.drop_table(u'donation')\n op.drop_index('ix_ballot_measure_ballot_type_id', 'ballot_measure')\n op.drop_index('ix_ballot_measure_election_id', 'ballot_measure')\n op.drop_table(u'ballot_measure')\n op.drop_index('ix_committee_election_id', 'committee')\n op.drop_table(u'committee')\n op.drop_index('ix_donor_employer_id', 'donor')\n op.drop_table(u'donor')\n op.drop_table(u'election')\n op.drop_table(u'tag')\n op.drop_table(u'employer')\n op.drop_table(u'ballot_type')\n op.drop_table(u'service')\n op.drop_table(u'consultant')\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mpl.style.use('classic')
<|reserved_special_token_0|>
ax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=
'$\\chi^B_2$')
ax1.axis([0, 300, -0.05, 0.2])
ax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel('$\\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
<|reserved_special_token_0|>
ax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=
'$\\chi^B_4$')
ax2.axis([0, 300, -0.15, 0.2])
ax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel('$\\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=
3.0, borderpad=0.5, borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig('chi.pdf')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mpl.style.use('classic')
chi2 = np.loadtxt('Lam0/buffer/chi2.dat')
chi4 = np.loadtxt('Lam0/buffer/chi4.dat')
fig = plt.figure(figsize=(9, 3.5))
ax1 = fig.add_subplot(121)
ax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=
'$\\chi^B_2$')
ax1.axis([0, 300, -0.05, 0.2])
ax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel('$\\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
ax2 = fig.add_subplot(122)
ax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=
'$\\chi^B_4$')
ax2.axis([0, 300, -0.15, 0.2])
ax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel('$\\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=
3.0, borderpad=0.5, borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig('chi.pdf')
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter
import matplotlib.ticker as ticker
import matplotlib as mpl
mpl.style.use('classic')
chi2 = np.loadtxt('Lam0/buffer/chi2.dat')
chi4 = np.loadtxt('Lam0/buffer/chi4.dat')
fig = plt.figure(figsize=(9, 3.5))
ax1 = fig.add_subplot(121)
ax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=
'$\\chi^B_2$')
ax1.axis([0, 300, -0.05, 0.2])
ax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel('$\\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
ax2 = fig.add_subplot(122)
ax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=
'$\\chi^B_4$')
ax2.axis([0, 300, -0.15, 0.2])
ax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel('$\\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=
3.0, borderpad=0.5, borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig('chi.pdf')
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import matplotlib.ticker as ticker
import matplotlib as mpl
mpl.style.use('classic')
# Data for plotting
chi2=np.loadtxt(r'Lam0/buffer/chi2.dat')
chi4=np.loadtxt(r'Lam0/buffer/chi4.dat')
# Create figure
fig=plt.figure(figsize=(9, 3.5))
ax1=fig.add_subplot(121)
ax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\chi^B_2$')
ax1.axis([0,300,-0.05,0.2])
ax1.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel(r'$\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
# Plot two
ax2=fig.add_subplot(122)
ax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\chi^B_4$')
ax2.axis([0,300,-0.15,0.2])
ax2.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel(r'$\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig("chi.pdf")
#plt.show()
|
flexible
|
{
"blob_id": "66904cbe3e57d9cc1ee385cd8a4c1ba3767626bd",
"index": 923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmpl.style.use('classic')\n<mask token>\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n<mask token>\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-3": "<mask token>\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# sphinx_gallery_thumbnail_number = 3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\n\nmpl.style.use('classic')\n\n\n# Data for plotting\n\n\nchi2=np.loadtxt(r'Lam0/buffer/chi2.dat')\nchi4=np.loadtxt(r'Lam0/buffer/chi4.dat')\n\n\n# Create figure\nfig=plt.figure(figsize=(9, 3.5))\nax1=fig.add_subplot(121)\n\nax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\\chi^B_2$')\n\n\nax1.axis([0,300,-0.05,0.2])\n\nax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel(r'$\\chi_2$', fontsize=15, color='black')\n\n\n\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n# Plot two\nax2=fig.add_subplot(122)\n\nax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\\chi^B_4$')\n\nax2.axis([0,300,-0.15,0.2])\n\nax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel(r'$\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)\n\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\n \n\nfig.savefig(\"chi.pdf\")\n\n#plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from discord.ext import commands
import discord
import os
import random
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
@bot.command()
async def ping(ctx):
await ctx.send("pong")
# Lucky command, it picks a number between 0-50 and spams your dm's with that number
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send("ARE YOU FELLING LUCKY???")
# Basic spam command, you can provide a message and specify how many messages
@bot.command()
async def spam(ctx, spamCtx="spam", spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
# Lets you mention a specific user who would like to spam in their DM's, you can specify a message
@bot.command()
async def attack(ctx, user: discord.User, *, message="GET SPAMMED NERD"):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == "__main__":
bot.run(os.environ['TOKEN'])
|
normal
|
{
"blob_id": "b48bc9475a8dc593ba858af8ed4e930ae290fd69",
"index": 6479,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-3": "<mask token>\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-4": "from discord.ext import commands\nimport discord\nimport os\nimport random\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-5": "from discord.ext import commands\nimport discord\nimport os\nimport random\n\nbot = commands.Bot(command_prefix=\"!\")\n\[email protected]\nasync def on_ready():\n print(f\"Logged in as {bot.user.name}\")\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send(\"pong\")\n\n\n# Lucky command, it picks a number between 0-50 and spams your dm's with that number\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send(\"ARE YOU FELLING LUCKY???\")\n\n# Basic spam command, you can provide a message and specify how many messages\[email protected]()\nasync def spam(ctx, spamCtx=\"spam\", spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n# Lets you mention a specific user who would like to spam in their DM's, you can specify a message\[email protected]()\nasync def attack(ctx, user: discord.User, *, message=\"GET SPAMMED NERD\"):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\nif __name__ == \"__main__\":\n bot.run(os.environ['TOKEN'])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from extras.plugins import PluginTemplateExtension
from .models import BGPSession
from .tables import BGPSessionTable
class DeviceBGPSession(PluginTemplateExtension):
model = 'dcim.device'
def left_page(self):
if self.context['config'].get('device_ext_page') == 'left':
return self.x_page()
return ''
def right_page(self):
if self.context['config'].get('device_ext_page') == 'right':
return self.x_page()
return ''
def full_width_page(self):
if self.context['config'].get('device_ext_page') == 'full_width':
return self.x_page()
return ''
def x_page(self):
obj = self.context['object']
sess = BGPSession.objects.filter(device=obj)
sess_table = BGPSessionTable(sess)
return self.render('netbox_bgp/device_extend.html', extra_context={
'related_session_table': sess_table})
template_extensions = [DeviceBGPSession]
|
normal
|
{
"blob_id": "be566041402dc1705aa9d644edc44de8792fbb3c",
"index": 4850,
"step-1": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n <mask token>\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n <mask token>\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n model = 'dcim.device'\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n\n def full_width_page(self):\n if self.context['config'].get('device_ext_page') == 'full_width':\n return self.x_page()\n return ''\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\ntemplate_extensions = [DeviceBGPSession]\n",
"step-4": "from extras.plugins import PluginTemplateExtension\nfrom .models import BGPSession\nfrom .tables import BGPSessionTable\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n model = 'dcim.device'\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n\n def full_width_page(self):\n if self.context['config'].get('device_ext_page') == 'full_width':\n return self.x_page()\n return ''\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\ntemplate_extensions = [DeviceBGPSession]\n",
"step-5": null,
"step-ids": [
1,
4,
7,
8
]
}
|
[
1,
4,
7,
8
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 15:36:38 2021
@author: mav24
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor
from sklearn.metrics import r2_score as r2
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split
"""
Reading the training data
"""
path = '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'
data = pd.read_excel(path)
#data.drop(columns=['Unnamed: 0', 'diesel', 'station wagon'], inplace=True)
drop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders', 'encoded origin']
data.drop(columns=drop, inplace=True)
# Scaling the data Standar sceler
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
"""
# Outliers Detection
iso = IsolationForest(contamination=0.05)
yhat = iso.fit_predict(X_scaled)
mask = yhat != -1
X_scaled, Y = X_scaled[mask, :], Y[mask]
"""
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
#model = GradientBoostingRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
model_for_cross = ExtraTreesRegressor()
#model_for_cross = GradientBoostingRegressor()
cross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring='neg_root_mean_squared_error')
print(f'Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
pipe = Pipeline(steps=[('scaler', StandardScaler()),
('extr', ExtraTreesRegressor(n_jobs=3))])
param_grid = {'extr__n_estimators':[100],
#'extr__criterion':['squared_error', 'mse', 'mae'],
'extr__max_depth':[None, 10, 20, 50, 100, 200, len(X_train)],
#'extr__min_samples_split':[1,2,3,5,10],
#'extr__min_samples_leaf':[1,2,3,5,10],
'extr__max_features':['auto', 'sqrt', 'log2'],
#'extr__max_leaf_nodes':[None, 1,2,3,4,5],
}
grid = GridSearchCV(pipe, param_grid, scoring='r2')
grid.fit(X_train, Y_train)
print(f'Best estimators for ExtraTreesRegressor: {grid.best_estimator_}')
print(f'Best score is: {grid.best_score_}')
"""
"""
# Scaling the data PowerTransformer
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = PowerTransformer()
X_scaled = scaler.fit_transform(X)
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With PowerTransformer')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
"""
"""
Validate the model to unseen data
"""
#path_val = '/home/mav24/Documents/Development/Regeneration/Project/Data/vavlidation_data.xlsx'
#data_val = pd.read_excel(path_val)
|
normal
|
{
"blob_id": "4a17db6b65e1615b0d519581b3e63bc34ad16093",
"index": 1288,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndata.drop(columns=drop, inplace=True)\n<mask token>\nmodel.fit(X_train, Y_train)\n<mask token>\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n<mask token>\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-3": "<mask token>\npath = (\n '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\n )\ndata = pd.read_excel(path)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',\n 'encoded origin']\ndata.drop(columns=drop, inplace=True)\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n<mask token>\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=\n 0.2, random_state=13)\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\nmodel_for_cross = ExtraTreesRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=\n 'neg_root_mean_squared_error')\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor\nfrom sklearn.metrics import r2_score as r2\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split\n<mask token>\npath = (\n '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\n )\ndata = pd.read_excel(path)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',\n 'encoded origin']\ndata.drop(columns=drop, inplace=True)\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n<mask token>\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=\n 0.2, random_state=13)\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\nmodel_for_cross = ExtraTreesRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=\n 'neg_root_mean_squared_error')\nprint(\n f\"\"\"Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}\"\"\"\n )\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 15 15:36:38 2021\n\n@author: mav24\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler\n\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor\nfrom sklearn.metrics import r2_score as r2\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import mean_absolute_error as mae\n\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split\n\n\n\"\"\"\nReading the training data\n\"\"\"\npath = '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'\ndata = pd.read_excel(path)\n\n#data.drop(columns=['Unnamed: 0', 'diesel', 'station wagon'], inplace=True)\ndrop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders', 'encoded origin']\ndata.drop(columns=drop, inplace=True)\n\n\n# Scaling the data Standar sceler\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform(X)\n\n\"\"\"\n# Outliers Detection\niso = IsolationForest(contamination=0.05)\nyhat = iso.fit_predict(X_scaled)\n\nmask = yhat != -1\nX_scaled, Y = X_scaled[mask, :], Y[mask]\n\"\"\"\n\n# Splitting the training data to train and test\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)\n\n\n\n# Training and prediction\nmodel = ExtraTreesRegressor()\n#model = GradientBoostingRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\n\nprint('With Standar Scaler')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n\n\n\n\n\nmodel_for_cross = ExtraTreesRegressor()\n#model_for_cross = GradientBoostingRegressor()\ncross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring='neg_root_mean_squared_error')\nprint(f'Cross validation is: {cross_val} \\n and mean: {np.mean(cross_val)} \\n and std:{np.std(cross_val)}')\n\n\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\npipe = Pipeline(steps=[('scaler', StandardScaler()),\n ('extr', ExtraTreesRegressor(n_jobs=3))])\n\nparam_grid = {'extr__n_estimators':[100],\n #'extr__criterion':['squared_error', 'mse', 'mae'],\n 'extr__max_depth':[None, 10, 20, 50, 100, 200, len(X_train)],\n #'extr__min_samples_split':[1,2,3,5,10],\n #'extr__min_samples_leaf':[1,2,3,5,10],\n 'extr__max_features':['auto', 'sqrt', 'log2'],\n #'extr__max_leaf_nodes':[None, 1,2,3,4,5],\n }\n\ngrid = GridSearchCV(pipe, param_grid, scoring='r2')\ngrid.fit(X_train, Y_train)\nprint(f'Best estimators for ExtraTreesRegressor: {grid.best_estimator_}')\nprint(f'Best score is: {grid.best_score_}')\n\"\"\"\n\n\n\"\"\"\n\n# Scaling the data PowerTransformer\nX = data.drop(columns='mpg')\nY = data['mpg']\nscaler = PowerTransformer()\nX_scaled = scaler.fit_transform(X)\n\n\n# Splitting the training data to train and test\nX_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)\n\n\n\n# Training and prediction\nmodel = ExtraTreesRegressor()\nmodel.fit(X_train, Y_train)\npred_test = model.predict(X_test)\n\n\nprint('With PowerTransformer')\nprint(f'The R2 accuracy is: {r2(Y_test, pred_test)}')\nprint(f'The mean square error is: {mse(Y_test, pred_test)}')\nprint(f'Mean absolute error is: {mae(Y_test, pred_test)}')\n\n\"\"\"\n\n\"\"\"\nValidate the model to unseen data\n\"\"\"\n\n#path_val = '/home/mav24/Documents/Development/Regeneration/Project/Data/vavlidation_data.xlsx'\n#data_val = pd.read_excel(path_val)\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
if answ[1] == 'дата':
apisay(datetime.date.today(), toho, torep)
|
flexible
|
{
"blob_id": "66444047f9e5eea845c8ac2dbaaf16fc2914d6ec",
"index": 370,
"step-1": "<mask token>\n",
"step-2": "if answ[1] == 'дата':\n apisay(datetime.date.today(), toho, torep)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import multiprocessing
import sys
import warnings
from pathlib import Path
import attr
import librosa
import pandas as pd
from rich.progress import BarColumn, Progress, TimeRemainingColumn
from sklearn.preprocessing import LabelEncoder
from tslearn.piecewise import SymbolicAggregateApproximation
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
import utils
if not sys.warnoptions:
warnings.simplefilter("ignore")
@attr.s
class MusicDB(object):
df = attr.ib()
feat = attr.ib()
sax = attr.ib()
# start of private methods
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False)
miao = our_feat[[("track", "genre_top")]]
miao = miao.loc[self.df.index]
miao.columns = ["genre"]
le = LabelEncoder()
label_encoders = dict()
column2encode = [("genre")]
for col in column2encode:
le = LabelEncoder()
miao["enc_genre"] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
@df.default
def _dataframe_default(self):
pick = self._dataframe_pickleload()
if type(pick) is not bool:
return pick
# if not, populate
return self._dataframe_populate()
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(
scaler.fit_transform(self.df.values).reshape(
self.df.values.shape[0], self.df.values.shape[1]
)
)
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments, alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0], segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path("data/picks/small.pkl")
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
# estabilish number of features using the main song
y, sr = librosa.load("data/music/000/000002.mp3", sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
# make df
print(f"Building a dataframe with {number_of_feat} features.")
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
# populate collection of paths of mp3s
p = Path("data/music").glob("**/*.mp3")
tracks = [x for x in p if x.is_file()]
print(f"Making a Dataframe of len {len(tracks)}.")
# make progress reporting
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"{task.completed} of {task.total}",
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
)
# populate df
with progress:
task_id = progress.add_task("[cyan]Extracting...", total=len(tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
# ensure the shape is the one of the main song
dfm = dfm.loc[:, : number_of_feat - 1]
print(f"There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.")
print(f"There also were {num_errors} errors.")
dfm = dfm.fillna(value=0)
dfm.to_pickle("data/picks/small.pkl")
return dfm
def _do_one_song(self, song):
# extract waveform and convert
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
# fix the index
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
if __name__ == "__main__":
music = MusicDB()
# some printing just to understand how this works
print(music.df.info())
print(music.df.head())
|
normal
|
{
"blob_id": "0e57e25c11ba97aef5467f61d99065609e127f5b",
"index": 2782,
"step-1": "<mask token>\n\n\[email protected]\nclass MusicDB(object):\n <mask token>\n <mask token>\n <mask token>\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n <mask token>\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n <mask token>\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nclass MusicDB(object):\n <mask token>\n <mask token>\n <mask token>\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n <mask token>\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\n<mask token>\n",
"step-4": "<mask token>\nif not sys.warnoptions:\n warnings.simplefilter('ignore')\n\n\[email protected]\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\nif __name__ == '__main__':\n music = MusicDB()\n print(music.df.info())\n print(music.df.head())\n",
"step-5": "import multiprocessing\nimport sys\nimport warnings\nfrom pathlib import Path\n\nimport attr\nimport librosa\nimport pandas as pd\nfrom rich.progress import BarColumn, Progress, TimeRemainingColumn\nfrom sklearn.preprocessing import LabelEncoder\nfrom tslearn.piecewise import SymbolicAggregateApproximation\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance\n\nimport utils\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\n\[email protected]\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n # start of private methods\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False)\n miao = our_feat[[(\"track\", \"genre_top\")]]\n miao = miao.loc[self.df.index]\n miao.columns = [\"genre\"]\n\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = [(\"genre\")]\n for col in column2encode:\n le = LabelEncoder()\n miao[\"enc_genre\"] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n # if not, populate\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(\n scaler.fit_transform(self.df.values).reshape(\n self.df.values.shape[0], self.df.values.shape[1]\n )\n )\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments, alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0], segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path(\"data/picks/small.pkl\")\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n # estabilish number of features using the main song\n y, sr = librosa.load(\"data/music/000/000002.mp3\", sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n\n # make df\n print(f\"Building a dataframe with {number_of_feat} features.\")\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n\n # populate collection of paths of mp3s\n p = Path(\"data/music\").glob(\"**/*.mp3\")\n tracks = [x for x in p if x.is_file()]\n print(f\"Making a Dataframe of len {len(tracks)}.\")\n\n # make progress reporting\n progress = Progress(\n \"[progress.description]{task.description}\",\n BarColumn(),\n \"{task.completed} of {task.total}\",\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n )\n\n # populate df\n with progress:\n task_id = progress.add_task(\"[cyan]Extracting...\", total=len(tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n\n dfm = dfm.sort_index()\n # ensure the shape is the one of the main song\n dfm = dfm.loc[:, : number_of_feat - 1]\n print(f\"There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.\")\n print(f\"There also were {num_errors} errors.\")\n dfm = dfm.fillna(value=0)\n dfm.to_pickle(\"data/picks/small.pkl\")\n return dfm\n\n def _do_one_song(self, song):\n # extract waveform and convert\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n # fix the index\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\nif __name__ == \"__main__\":\n music = MusicDB()\n # some printing just to understand how this works\n print(music.df.info())\n print(music.df.head())\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
import sys
import time
import numpy
import pb_robot
import pyquaternion
import pybullet as p
from copy import deepcopy
from actions import PlaceAction, make_platform_world
from block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, \
Quaternion, get_rotated_block, Pose, add_noise, \
Environment, Position, World
from pddlstream.utils import INF
from pybullet_utils import transformation
import tamp.primitives
from tamp.misc import setup_panda_world, get_pddl_block_lookup, \
print_planning_problem, ExecuteActions, ExecutionFailure
from tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan
class PandaAgent:
def __init__(self, blocks, noise=0.00005, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
# Setup PyBullet instance to run in the background and handle planning/collision checking.
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
self.pddl_blocks, self.platform_table, self.platform_leg, self.table, self.frame, self.wall = setup_panda_world(self.robot,
blocks,
block_init_xy_poses,
use_platform=use_platform)
self.fixed = [self.platform_table, self.platform_leg, self.table, self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
# Setup PyBullet instance that only visualizes plan execution. State needs to match the planning instance.
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform=use_platform)
# Set up ROS plumbing if using features that require it
if self.use_vision or self.use_planning_server or self.use_learning_server or real:
import rospy
try:
rospy.init_node("panda_agent")
except:
print("ROS Node already created")
# Create an arm interface
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = "/franka_ros_interface/custom_franka_state_controller/robot_state"
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(
state_topic, RobotState, self.robot_state_callback)
# Set initial poses of all blocks and setup vision ROS services.
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy('get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy('get_block_poses_wrist', GetBlockPosesWrist)
# Start ROS clients and servers as needed
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print("Waiting for planning server...")
rospy.wait_for_service("get_latest_plan")
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy(
"/reset_planning", SetPlanningState)
self.get_plan_client = rospy.ServiceProxy(
"/get_latest_plan", GetPlan)
print("Done!")
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service(
"/plan_tower", PlanTower, self.learning_server_callback)
print("Learning server started!")
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=False,
approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75), size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
def reset_world(self):
""" Resets the planning world to its original configuration """
print("Resetting world")
if self.real:
angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
angles = self.orig_joint_angles
self.plan()
self.robot.arm.SetJointValues(angles)
self.execute()
self.execution_robot.arm.SetJointValues(angles)
for bx, b in enumerate(self.pddl_blocks):
b.set_base_link_pose(self.orig_block_poses[bx])
print("Done")
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',),
('Conf', conf),
('StartConf', conf),
('AtConf', conf),
('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.get_base_link_pose())
init += [('Pose', self.table, self.table_pose),
('AtPose', self.table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body),
('Pose', body, pose),
('AtPose', body, pose),
('Block', body),
('On', body, self.table),
('Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table, self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose),
('AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2,3] -= self.platform_leg.get_dimensions()[2]/2.
rel_transform = numpy.linalg.inv(platform_transform)@block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
# TODO: Add noise to the observation.
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise*numpy.eye(3))
return end_pose
def _update_block_poses(self, find_moved=False):
""" Use the global world cameras to update the positions of the blocks """
try:
resp = self._get_block_poses_world()
named_poses = resp.poses
except:
import sys
print('Service call to get block poses failed. Exiting.')
sys.exit()
n_found = 0
for pddl_block_name, pddl_block in self.pddl_block_lookup.items():
for named_pose in named_poses:
if named_pose.block_id == pddl_block_name.split('_')[-1]:
pose = named_pose.pose.pose
# Skip changes the pose of objects in storage.
if pose.position.x < 0.05:
continue
n_found += 1
position = (pose.position.x, pose.position.y, pose.position.z)
orientation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
if not self.use_planning_server:
self.plan()
pddl_block.set_base_link_pose((position, orientation))
if find_moved and n_found != len(self.moved_blocks):
input('Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.')
self._update_block_poses(find_moved=True)
return
# After loading from vision, objects may be in collision. Resolve this.
for _, pddl_block in self.pddl_block_lookup.items():
if pb_robot.collisions.body_collision(pddl_block, self.table):
print('Collision with table and block:', pddl_block.readableName)
position, orientation = pddl_block.get_base_link_pose()
stable_z = pb_robot.placements.stable_z(pddl_block, self.table)
position = (position[0], position[1], stable_z)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
self.plan()
pddl_block.set_base_link_pose((position, orientation))
# Resolve from low to high blocks.
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=False)
for ix in range(len(block_ixs)):
bottom_block = self.pddl_blocks[block_ixs[ix]]
for jx in range(ix+1, len(block_ixs)):
top_block = self.pddl_blocks[block_ixs[jx]]
dist_moved = 0
while pb_robot.collisions.body_collision(bottom_block, top_block):
print('Collision with bottom %s and top %s:' % (bottom_block.readableName, top_block.readableName))
position, orientation = top_block.get_base_link_pose()
stable_z = position[2] + 0.001
dist_moved += 0.001
if self.real and dist_moved > 0.04:
print(f"Found blocks {bottom_block} and {top_block} in collision")
input("Manually move the blocks and press Enter to continue")
self._update_block_poses(find_moved=False)
return
position = (position[0], position[1], stable_z)
self.execute()
top_block.set_base_link_pose((position, orientation))
self.plan()
top_block.set_base_link_pose((position, orientation))
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
# Set up the list of original poses and order of blocks in the tower
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
# Build the initial data structures
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
# Initial poses and robot configuration
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
# Base block goal pose
# TODO: Set base block to be rotated in its current position.
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = (base_xy[0], base_xy[1], tower[0].pose.pos.z)
base_pose = (base_pos, tower[0].rotation)
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[0].rotation)))
# Other block goal poses
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix-1]
bottom_pose = (bottom_block.pose.pos, bottom_block.rotation)
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = (top_block.pose.pos, top_block.rotation)
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform)@top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
# Finally, tack on the tower resetting steps
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
# Return the planning data structure
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print("Resetting blocks...")
print("Moved Blocks:", self.moved_blocks)
# Define block order by sorting by height
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)
# Build the initial data structures
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
# Add all blocks to be moved to the data structure
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
# Return the planning data structure
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0., 0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
planning_prob = self.build_planning_problem(tower, base_xy)
# Execute the stacking plan
success, stack_stable, reset_stable, num_success, fatal = \
self.plan_and_execute(planning_prob, real, T, stack=True, ignore_resets=ignore_resets)
print(f"Completed tower stack with success: {success}, stable: {stack_stable}")
if reset_stable:
print(f"Completed tower reset stable: {reset_stable}")
# If we have a nonfatal failure, replan from new state, removing successful goals
while (not success and not fatal):
print(f"Got recoverable failure. Replanning from step index {num_success}.")
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
planning_prob.robot_config.angles = self.robot.arm.GetJointValues()
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = self.last_obj_held.body.readableName
transform_to_ros(self.last_obj_held.grasp_objF, planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = \
self.plan_and_execute(planning_prob, real, T, stack=True, start_idx=num_success, ignore_resets=ignore_resets)
print(f"Completed tower stack with success: {success}, stable: {stack_stable}")
if reset_stable:
print(f"Completed tower reset stable: {reset_stable}")
# Write the number of successfully stacked blocks
num_stack_success = min(len(tower), num_success)
# If the full tower did not succeed, reset the moved blocks
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
# TODO: Return arm to home position to help with vision.
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f"Resetting {len(self.moved_blocks)} blocks.")
reset_success, _, reset_stable, num_reset_success, reset_fatal = \
self.plan_and_execute(planning_prob, real, T, stack=False, start_idx=num_reset_success)
except Exception as e:
print("Planning/execution failed during tower reset.")
print(e)
# Return the final planning state
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
# Initialize variables
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
# Send a reset request to the planning server
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
# PLANNING
# If using planning server, request a plan from the server using ROS
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state[num_success].name]
# Wait for a valid plan
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print("Getting a plan from server...")
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print("Planning failed on server side.")
# If failure happened during stacking, it is a fatal failure
if (ros_req.goal_state[num_success].stack):
print(f"Failed during stacking {query_block}")
fatal = True
# If failure happened during resetting, prompt user to manually reset blocks
else:
print(f"Failed during resetting {query_block}")
input("Manually reset the blocks and press Enter to continue")
if real:
self._update_block_poses()
fatal = False
return False, stack_stable, reset_stable, num_success, fatal
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.execution_robot, self.pddl_block_lookup)
# Otherwise, plan locally
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
# Unpack initial conditions
fixed_objs = self.fixed + [b for b in self.pddl_blocks if b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps/2) and self.alternate_orientations:
init += [("Reset",)]
goal_terms.append(("AtHome", blk))
else:
init += [('Pose', blk, blk_pose),
('Supported', blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
# Plan with PDDLStream
pddl_info = get_pddlstream_info(self.robot,
fixed_objs,
self.pddl_blocks,
add_slanted_grasps=True,
approach_frame='global',
use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0,
max_time=INF)
if plan is None:
print("\nFailed to plan\n")
fatal = False
return False, stack_stable, reset_stable, num_success, fatal
saved_world.restore()
print("\nGot plan:")
print(plan)
# Once we have a plan, execute it
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False, prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0, sim_recoverable_failure_prob=0.0)
# Manage the moved blocks (add to the set when stacking, remove when unstacking)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
# Check stability
if not real:
self.step_simulation(T, vis_frames=False)
#input('Press enter to check stability.')
if stack:
stable = self.check_stability(real, query_block, desired_pose)
else:
stable = True # Don't care about stability on reset
if stable == 0.:
prompt = input('Tower NOT stable. Is this true? [y: Unstable / n: Stable]')
if prompt == 'n':
stable = 1.
#input('Continue?')
# Manage the success status of the plan
if stable == 0.:
print("Unstable after execution!")
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps/2:
print("Completed tower stack!")
stack_stable = True
stack = False
if ignore_resets:
return True, stack_stable, reset_stable, num_success, False
elif num_success == num_steps:
print("Completed tower reset!")
reset_stable = True
return True, stack_stable, reset_stable, num_success, False
except ExecutionFailure as e:
print("Planning/execution failed.")
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
# Get pose of blocks using wrist camera.
try:
poses = self._get_block_poses_wrist().poses
except:
print('Service call to get block poses failed during check stability. Exiting.')
sys.exit()
# Check if pose is close to desired_pose.
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = (pose.position.x, pose.position.y, pose.position.z)
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
# First check if the pose is too far away.
dist = numpy.linalg.norm(numpy.array(obs_pos)-numpy.array(des_pos))
print(f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.
# Also check that the block is flat on the table.
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1], orn[2])
angle = (des_orn.inverse*obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(f'[Check Stability] Orientation Distance (> 15): {angle}')
if angle > 15:
return 0.
# If block isn't visible, return 0.
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(desired_pose[0]))
# print(f"Distance is {dist}")
# print(f"Block dimensions are {block_pddl.get_dimensions()}")
if dist > 0.01:
print('Unstable!')
return 0.
return 1.
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == "pick"]
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(f"Received plan to move {plan_block} and expected to move {tgt_block}")
return (tgt_block.readableName == plan_block)
def robot_state_callback(self, msg):
""" Processes robot state errors and raises execution failures for planning """
cur_time = time.time()
if (cur_time - self.arm_last_error_time) < self.arm_error_check_time:
return
self.arm_last_error_time = cur_time
cur_errors = msg.current_errors
# if cur_errors.cartesian_reflex:
# reason = "Cartesian reflex error detected!"
# raise ExecutionFailure(reason=reason, fatal=False)
if cur_errors.communication_constraints_violation:
reason = "Communication constraints violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_position_limits_violation:
reason = "Joint position limits violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_motion_generator_position_limits_violation:
reason = "Joint motion generator position limits violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):
""" Service callback function to plan and execute a tower from active learning script """
from stacking_ros.srv import PlanTowerResponse
from tamp.ros_utils import ros_to_tower
tower = ros_to_tower(ros_req.tower_info)
success, stable, num_stack_stable = self.simulate_tower(
tower, True, real=self.real, base_xy=base_xy)
resp = PlanTowerResponse()
resp.success = success
resp.stable = stable
resp.num_stack_stable = num_stack_stable
return resp
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1/2400.)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1,0,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
p.addUserDebugLine(pos, new_y, [0,1,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
p.addUserDebugLine(pos, new_z, [0,0,1], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False, vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert(self.platform_table is not None)
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
# Set up the PDDLStream problem for the placing the given block on the
# platform with the specified action.
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=False,
approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
# Figure out the correct transformation matrix based on the action.
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2]/2. + rotated_block.dimensions[2]/2 #+ 1e-5
tform = numpy.array([[1., 0., 0., x],
[0., 1., 0., y],
[0., 0., 1., z],
[0., 0., 0., 1.]])
tform[0:3, 0:3] = action.rot.as_matrix()
# Code to visualize where the block will be placed.
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.platform_table.get_base_link_pose())
body_tform = surface_tform@tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1,0,0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0,1,0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0,0,1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = ('On', pddl_block, self.platform_table)
# Solve the PDDLStream problem.
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
# Execute the action.
# TODO: Check gravity compensation in the arm.
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = (action, T, end_pose)
self.step_simulation(500-T)
# Put block back in original position.
# TODO: Check if block is on the table or platform to start.
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=True,
approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose),
('Supported', pddl_block, goal_pose, self.table, self.table_pose)]
goal = ('and', ('AtPose', pddl_block, goal_pose),
('On', pddl_block, self.table))
# Solve the PDDLStream problem.
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100., search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node("panda_client")
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print("Waiting for Panda Agent server...")
rospy.wait_for_service("/plan_tower")
print("Done")
self.client = rospy.ServiceProxy(
"/plan_tower", PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
|
normal
|
{
"blob_id": "5c1465bc70010ecabc156a04ec9877bbf66a229d",
"index": 5150,
"step-1": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n <mask token>\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n <mask token>\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n <mask token>\n <mask token>\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-2": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n <mask token>\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n <mask token>\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-3": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.\n position.z)\n orientation = (pose.orientation.x, pose.orientation.y,\n pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n if find_moved and n_found != len(self.moved_blocks):\n input(\n 'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'\n )\n self._update_block_poses(find_moved=True)\n return\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.\n readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = position[0], position[1], stable_z\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix + 1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block,\n top_block):\n print('Collision with bottom %s and top %s:' % (\n bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(\n f'Found blocks {bottom_block} and {top_block} in collision'\n )\n input(\n 'Manually move the blocks and press Enter to continue'\n )\n self._update_block_poses(find_moved=False)\n return\n position = position[0], position[1], stable_z\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if cur_time - self.arm_last_error_time < self.arm_error_check_time:\n return\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n if cur_errors.communication_constraints_violation:\n reason = 'Communication constraints violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = 'Joint position limits violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = (\n 'Joint motion generator position limits violation detected!')\n raise ExecutionFailure(reason=reason, fatal=True)\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-4": "import sys\nimport time\nimport numpy\nimport pb_robot\nimport pyquaternion\nimport pybullet as p\nfrom copy import deepcopy\nfrom actions import PlaceAction, make_platform_world\nfrom block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, Quaternion, get_rotated_block, Pose, add_noise, Environment, Position, World\nfrom pddlstream.utils import INF\nfrom pybullet_utils import transformation\nimport tamp.primitives\nfrom tamp.misc import setup_panda_world, get_pddl_block_lookup, print_planning_problem, ExecuteActions, ExecutionFailure\nfrom tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.\n position.z)\n orientation = (pose.orientation.x, pose.orientation.y,\n pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n if find_moved and n_found != len(self.moved_blocks):\n input(\n 'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'\n )\n self._update_block_poses(find_moved=True)\n return\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.\n readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = position[0], position[1], stable_z\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix + 1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block,\n top_block):\n print('Collision with bottom %s and top %s:' % (\n bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(\n f'Found blocks {bottom_block} and {top_block} in collision'\n )\n input(\n 'Manually move the blocks and press Enter to continue'\n )\n self._update_block_poses(find_moved=False)\n return\n position = position[0], position[1], stable_z\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if cur_time - self.arm_last_error_time < self.arm_error_check_time:\n return\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n if cur_errors.communication_constraints_violation:\n reason = 'Communication constraints violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = 'Joint position limits violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = (\n 'Joint motion generator position limits violation detected!')\n raise ExecutionFailure(reason=reason, fatal=True)\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-5": "import sys\nimport time\nimport numpy\nimport pb_robot\nimport pyquaternion\nimport pybullet as p\nfrom copy import deepcopy\n\nfrom actions import PlaceAction, make_platform_world\nfrom block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, \\\n Quaternion, get_rotated_block, Pose, add_noise, \\\n Environment, Position, World\nfrom pddlstream.utils import INF\nfrom pybullet_utils import transformation\nimport tamp.primitives\nfrom tamp.misc import setup_panda_world, get_pddl_block_lookup, \\\n print_planning_problem, ExecuteActions, ExecutionFailure\nfrom tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan\n\n\nclass PandaAgent:\n def __init__(self, blocks, noise=0.00005, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False, \n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n\n # Setup PyBullet instance to run in the background and handle planning/collision checking.\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n self.pddl_blocks, self.platform_table, self.platform_leg, self.table, self.frame, self.wall = setup_panda_world(self.robot,\n blocks,\n block_init_xy_poses,\n use_platform=use_platform)\n self.fixed = [self.platform_table, self.platform_leg, self.table, self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks)\n\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n\n # Setup PyBullet instance that only visualizes plan execution. State needs to match the planning instance.\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform=use_platform)\n\n # Set up ROS plumbing if using features that require it\n if self.use_vision or self.use_planning_server or self.use_learning_server or real:\n import rospy\n try:\n rospy.init_node(\"panda_agent\")\n except:\n print(\"ROS Node already created\")\n\n # Create an arm interface\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n\n from franka_core_msgs.msg import RobotState\n state_topic = \"/franka_ros_interface/custom_franka_state_controller/robot_state\"\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(\n state_topic, RobotState, self.robot_state_callback)\n\n # Set initial poses of all blocks and setup vision ROS services.\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy('get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy('get_block_poses_wrist', GetBlockPosesWrist)\n\n # Start ROS clients and servers as needed\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n\n print(\"Waiting for planning server...\")\n rospy.wait_for_service(\"get_latest_plan\")\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy(\n \"/reset_planning\", SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy(\n \"/get_latest_plan\", GetPlan)\n print(\"Done!\")\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service(\n \"/plan_tower\", PlanTower, self.learning_server_callback)\n print(\"Learning server started!\")\n\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=False,\n approach_frame='global',\n use_vision=self.use_vision)\n\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75), size=2)\n self.plan()\n\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")\n\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',),\n ('Conf', conf),\n ('StartConf', conf),\n ('AtConf', conf),\n ('HandEmpty',)]\n\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), \n ('AtPose', self.table, self.table_pose)]\n\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body),\n ('Pose', body, pose),\n ('AtPose', body, pose),\n ('Block', body),\n ('On', body, self.table),\n ('Supported', body, pose, self.table, self.table_pose)]\n\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table, self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), \n ('AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2,3] -= self.platform_leg.get_dimensions()[2]/2.\n\n rel_transform = numpy.linalg.inv(platform_transform)@block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n # TODO: Add noise to the observation.\n\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise*numpy.eye(3))\n\n return end_pose\n\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n # Skip changes the pose of objects in storage.\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.position.z)\n orientation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n\n if find_moved and n_found != len(self.moved_blocks):\n input('Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.')\n self._update_block_poses(find_moved=True)\n return\n\n # After loading from vision, objects may be in collision. Resolve this.\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = (position[0], position[1], stable_z)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n\n # Resolve from low to high blocks.\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix+1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block, top_block):\n print('Collision with bottom %s and top %s:' % (bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(f\"Found blocks {bottom_block} and {top_block} in collision\")\n input(\"Manually move the blocks and press Enter to continue\")\n self._update_block_poses(find_moved=False)\n return\n position = (position[0], position[1], stable_z)\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n # Set up the list of original poses and order of blocks in the tower\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n\n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n # Initial poses and robot configuration\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n\n # Base block goal pose\n # TODO: Set base block to be rotated in its current position.\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = (base_xy[0], base_xy[1], tower[0].pose.pos.z)\n base_pose = (base_pos, tower[0].rotation)\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[0].rotation)))\n\n # Other block goal poses\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix-1]\n bottom_pose = (bottom_block.pose.pos, bottom_block.rotation)\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = (top_block.pose.pos, top_block.rotation)\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n\n rel_tform = numpy.linalg.inv(bottom_tform)@top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n \n # Finally, tack on the tower resetting steps\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n\n print(\"Resetting blocks...\")\n print(\"Moved Blocks:\", self.moved_blocks)\n \n # Define block order by sorting by height\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)\n \n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n\n # Add all blocks to be moved to the data structure\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0., 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n planning_prob = self.build_planning_problem(tower, base_xy)\n\n # Execute the stacking plan\n success, stack_stable, reset_stable, num_success, fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=True, ignore_resets=ignore_resets)\n print(f\"Completed tower stack with success: {success}, stable: {stack_stable}\")\n if reset_stable:\n print(f\"Completed tower reset stable: {reset_stable}\")\n\n # If we have a nonfatal failure, replan from new state, removing successful goals\n while (not success and not fatal):\n print(f\"Got recoverable failure. Replanning from step index {num_success}.\")\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n planning_prob.robot_config.angles = self.robot.arm.GetJointValues()\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = self.last_obj_held.body.readableName\n transform_to_ros(self.last_obj_held.grasp_objF, planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=True, start_idx=num_success, ignore_resets=ignore_resets)\n print(f\"Completed tower stack with success: {success}, stable: {stack_stable}\")\n if reset_stable:\n print(f\"Completed tower reset stable: {reset_stable}\")\n\n # Write the number of successfully stacked blocks\n num_stack_success = min(len(tower), num_success)\n\n # If the full tower did not succeed, reset the moved blocks\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n # TODO: Return arm to home position to help with vision.\n \n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f\"Resetting {len(self.moved_blocks)} blocks.\")\n reset_success, _, reset_stable, num_reset_success, reset_fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=False, start_idx=num_reset_success)\n\n except Exception as e:\n print(\"Planning/execution failed during tower reset.\")\n print(e)\n\n # Return the final planning state\n return success, stack_stable, num_stack_success\n\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n # Initialize variables\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n\n if self.use_planning_server:\n # Send a reset request to the planning server\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n\n while num_success < num_steps:\n try:\n # PLANNING\n # If using planning server, request a plan from the server using ROS\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state[num_success].name]\n\n # Wait for a valid plan\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print(\"Getting a plan from server...\")\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print(\"Planning failed on server side.\")\n # If failure happened during stacking, it is a fatal failure\n if (ros_req.goal_state[num_success].stack):\n print(f\"Failed during stacking {query_block}\")\n fatal = True\n # If failure happened during resetting, prompt user to manually reset blocks\n else:\n print(f\"Failed during resetting {query_block}\")\n input(\"Manually reset the blocks and press Enter to continue\")\n if real:\n self._update_block_poses()\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.execution_robot, self.pddl_block_lookup)\n\n # Otherwise, plan locally\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n \n # Unpack initial conditions\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps/2) and self.alternate_orientations:\n init += [(\"Reset\",)]\n goal_terms.append((\"AtHome\", blk))\n else:\n init += [('Pose', blk, blk_pose),\n ('Supported', blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n \n # Plan with PDDLStream\n pddl_info = get_pddlstream_info(self.robot,\n fixed_objs,\n self.pddl_blocks,\n add_slanted_grasps=True,\n approach_frame='global',\n use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal, \n search_sample_ratio=1.0, \n max_time=INF)\n if plan is None:\n print(\"\\nFailed to plan\\n\")\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n saved_world.restore()\n\n print(\"\\nGot plan:\")\n print(plan)\n\n # Once we have a plan, execute it\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False, prompt=False, obstacles=obstacles, \n sim_fatal_failure_prob=0.0, sim_recoverable_failure_prob=0.0)\n\n # Manage the moved blocks (add to the set when stacking, remove when unstacking)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n\n # Check stability\n if not real:\n self.step_simulation(T, vis_frames=False)\n #input('Press enter to check stability.')\n if stack:\n stable = self.check_stability(real, query_block, desired_pose)\n else:\n stable = True # Don't care about stability on reset\n\n if stable == 0.:\n prompt = input('Tower NOT stable. Is this true? [y: Unstable / n: Stable]')\n if prompt == 'n':\n stable = 1.\n #input('Continue?')\n\n # Manage the success status of the plan\n if stable == 0.:\n print(\"Unstable after execution!\")\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps/2:\n print(\"Completed tower stack!\")\n stack_stable = True\n stack = False\n if ignore_resets:\n return True, stack_stable, reset_stable, num_success, False\n elif num_success == num_steps:\n print(\"Completed tower reset!\")\n reset_stable = True\n return True, stack_stable, reset_stable, num_success, False\n\n except ExecutionFailure as e:\n print(\"Planning/execution failed.\")\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n # Get pose of blocks using wrist camera.\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print('Service call to get block poses failed during check stability. Exiting.')\n sys.exit()\n\n # Check if pose is close to desired_pose.\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1]:\n visible = True\n pose = named_pose.pose.pose\n\n des_pos = desired_pose[0]\n obs_pos = (pose.position.x, pose.position.y, pose.position.z)\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n # First check if the pose is too far away.\n dist = numpy.linalg.norm(numpy.array(obs_pos)-numpy.array(des_pos))\n print(f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.\n # Also check that the block is flat on the table.\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1], orn[2])\n angle = (des_orn.inverse*obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(f'[Check Stability] Orientation Distance (> 15): {angle}')\n if angle > 15:\n return 0.\n\n # If block isn't visible, return 0.\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.\n\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(desired_pose[0]))\n # print(f\"Distance is {dist}\")\n # print(f\"Block dimensions are {block_pddl.get_dimensions()}\")\n if dist > 0.01:\n print('Unstable!')\n return 0.\n return 1.\n\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == \"pick\"]\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(f\"Received plan to move {plan_block} and expected to move {tgt_block}\")\n return (tgt_block.readableName == plan_block)\n\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if (cur_time - self.arm_last_error_time) < self.arm_error_check_time:\n return\n\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n # if cur_errors.cartesian_reflex:\n # reason = \"Cartesian reflex error detected!\"\n # raise ExecutionFailure(reason=reason, fatal=False)\n if cur_errors.communication_constraints_violation:\n reason = \"Communication constraints violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = \"Joint position limits violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = \"Joint motion generator position limits violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(\n tower, True, real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n\n q = self.robot.get_joint_positions()\n\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n\n time.sleep(1/2400.)\n\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n\n p.addUserDebugLine(pos, new_x, [1,0,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n p.addUserDebugLine(pos, new_y, [0,1,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n p.addUserDebugLine(pos, new_z, [0,0,1], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False, vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert(self.platform_table is not None)\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n\n original_pose = pddl_block.get_base_link_pose()\n\n # Set up the PDDLStream problem for the placing the given block on the\n # platform with the specified action.\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=False,\n approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n\n # Figure out the correct transformation matrix based on the action.\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2]/2. + rotated_block.dimensions[2]/2 #+ 1e-5\n tform = numpy.array([[1., 0., 0., x],\n [0., 1., 0., y],\n [0., 0., 1., z],\n [0., 0., 0., 1.]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n\n # Code to visualize where the block will be placed.\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.platform_table.get_base_link_pose())\n body_tform = surface_tform@tform\n length, lifeTime = 0.2, 0.0\n\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n\n p.addUserDebugLine(pos, new_x, [1,0,0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0,1,0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0,0,1], lifeTime=lifeTime)\n\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = ('On', pddl_block, self.platform_table)\n\n # Solve the PDDLStream problem.\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n\n # Execute the action.\n # TODO: Check gravity compensation in the arm.\n\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = (action, T, end_pose)\n self.step_simulation(500-T)\n\n # Put block back in original position.\n\n # TODO: Check if block is on the table or platform to start.\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=True,\n approach_frame='gripper',\n use_vision=self.use_vision)\n\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose),\n ('Supported', pddl_block, goal_pose, self.table, self.table_pose)]\n goal = ('and', ('AtPose', pddl_block, goal_pose),\n ('On', pddl_block, self.table))\n\n # Solve the PDDLStream problem.\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100., search_sample_ratio=1000)\n return observation\n\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node(\"panda_client\")\n self.restart_services()\n\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print(\"Waiting for Panda Agent server...\")\n rospy.wait_for_service(\"/plan_tower\")\n print(\"Done\")\n self.client = rospy.ServiceProxy(\n \"/plan_tower\", PlanTower)\n\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n\n if vis:\n env.disconnect()\n\n return response.success, response.stable, response.num_stack_stable\n",
"step-ids": [
20,
22,
24,
25,
26
]
}
|
[
20,
22,
24,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .models import CNNClassifier, load_weights, LastLayer_Alexnet, classes, MyResNet
from .transforms import image_transforms, tensor_transform
from .utils import newest_model, Dataset, load_data
|
flexible
|
{
"blob_id": "17781ae5e9c72232fbc11c7eda7daeaeb0fa3670",
"index": 9277,
"step-1": "<mask token>\n",
"step-2": "from .models import CNNClassifier, load_weights, LastLayer_Alexnet, classes, MyResNet\nfrom .transforms import image_transforms, tensor_transform\nfrom .utils import newest_model, Dataset, load_data\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import cv2
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream', f)
if cv2.waitKey(1) & 255 == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
"""Access IP Camera in Python OpenCV"""
import cv2
#stream = cv2.VideoCapture('protocol://IP:port/1')
# Use the next line if your camera has a username and password
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream',f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "f9db3c96bc3fd4911640d0428672c87072564b0d",
"index": 710,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "<mask token>\nimport cv2\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "\"\"\"Access IP Camera in Python OpenCV\"\"\"\r\n\r\nimport cv2\r\n\r\n#stream = cv2.VideoCapture('protocol://IP:port/1')\r\n\r\n# Use the next line if your camera has a username and password\r\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video') \r\n\r\nwhile True:\r\n\r\n r, f = stream.read()\r\n cv2.imshow('IP Camera stream',f)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from sqlalchemy import or_
from ..extensions import db
from .models import User
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop("username")
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
def get_user_like(query):
return User.query.filter(or_(User.username,like('%'+query+'%'))).limit(10).all()
|
normal
|
{
"blob_id": "49c15f89225bb1dd1010510fe28dba34f6a8d085",
"index": 4866,
"step-1": "<mask token>\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username, like('%' + query + '%'))\n ).limit(10).all()\n",
"step-4": "from sqlalchemy import or_\nfrom ..extensions import db\nfrom .models import User\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username, like('%' + query + '%'))\n ).limit(10).all()\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom sqlalchemy import or_\n\nfrom ..extensions import db \nfrom .models import User\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop(\"username\")\n if username is not None:\n return User.query.filter_by(username=username).first()\n\n raise NotImplementedError\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username,like('%'+query+'%'))).limit(10).all()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def strictly_greater_than(value):
if value:
return 'Greater than 100'
elif value:
return 'Greater than 10'
else:
return '10 or less'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def strictly_greater_than(value):
if value:
return 'Greater than 100'
elif value:
return 'Greater than 10'
else:
return '10 or less'
print(strictly_greater_than(1))
<|reserved_special_token_1|>
def strictly_greater_than(value):
if value : # Change this line
return "Greater than 100"
elif value : # Change this line
return "Greater than 10"
else:
return "10 or less"
# Change the value 1 below to experiment with different values
print(strictly_greater_than(1))
|
flexible
|
{
"blob_id": "7620d76afc65ceb3b478f0b05339ace1f1531f7d",
"index": 6708,
"step-1": "<mask token>\n",
"step-2": "def strictly_greater_than(value):\n if value:\n return 'Greater than 100'\n elif value:\n return 'Greater than 10'\n else:\n return '10 or less'\n\n\n<mask token>\n",
"step-3": "def strictly_greater_than(value):\n if value:\n return 'Greater than 100'\n elif value:\n return 'Greater than 10'\n else:\n return '10 or less'\n\n\nprint(strictly_greater_than(1))\n",
"step-4": "def strictly_greater_than(value):\n if value : # Change this line\n return \"Greater than 100\"\n elif value : # Change this line\n return \"Greater than 10\"\n else:\n return \"10 or less\"\n\n# Change the value 1 below to experiment with different values\nprint(strictly_greater_than(1))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import tensorflow.keras
from PIL import Image, ImageOps
from os import listdir
from os.path import isfile, join
import numpy as np
import glob
import cv2
np.set_printoptions(suppress = True)
# Load the model
model = tensorflow.keras.models.load_model('./converted_keras/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape = (1, 224, 224, 3), dtype = np.float32)
path = glob.glob("/Users/zjisuoo/Documents/zjisuoo_git/OurChord/00_NOTE_DATA/TEST/*.png")
images = []
for image in path :
n1 = cv2.imread(image)
n2 = cv2.resize(n1, (244, 244))
images.append(n2)
print(image)
#turn the image int a numpy array
image_array = np.array(n2)
# Normalize the image
normalized_image_array = (image_array.astype(dtype = np.float32) / 127.0) - 1
# Load the image into the array
data = normalized_image_array
# run the inference
prediction = model.predict(data)
# print(prediction)
if(prediction[0][0] > 0.8):
print("2분음표")
elif(prediction[0][1] > 0.8):
print("4분음표")
elif(prediction[0][2] > 0.8):
print("8분음표")
elif(prediction[0][3] > 0.8):
print("16분음표")
else:
print("음표아님")
|
normal
|
{
"blob_id": "13b69ec61d6b2129f1974ce7cae91c84100b3b58",
"index": 449,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(suppress=True)\n<mask token>\nfor image in path:\n n1 = cv2.imread(image)\n n2 = cv2.resize(n1, (244, 244))\n images.append(n2)\n print(image)\n<mask token>\nif prediction[0][0] > 0.8:\n print('2분음표')\nelif prediction[0][1] > 0.8:\n print('4분음표')\nelif prediction[0][2] > 0.8:\n print('8분음표')\nelif prediction[0][3] > 0.8:\n print('16분음표')\nelse:\n print('음표아님')\n",
"step-3": "<mask token>\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('./converted_keras/keras_model.h5')\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\npath = glob.glob(\n '/Users/zjisuoo/Documents/zjisuoo_git/OurChord/00_NOTE_DATA/TEST/*.png')\nimages = []\nfor image in path:\n n1 = cv2.imread(image)\n n2 = cv2.resize(n1, (244, 244))\n images.append(n2)\n print(image)\nimage_array = np.array(n2)\nnormalized_image_array = image_array.astype(dtype=np.float32) / 127.0 - 1\ndata = normalized_image_array\nprediction = model.predict(data)\nif prediction[0][0] > 0.8:\n print('2분음표')\nelif prediction[0][1] > 0.8:\n print('4분음표')\nelif prediction[0][2] > 0.8:\n print('8분음표')\nelif prediction[0][3] > 0.8:\n print('16분음표')\nelse:\n print('음표아님')\n",
"step-4": "import tensorflow.keras\nfrom PIL import Image, ImageOps\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport glob\nimport cv2\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('./converted_keras/keras_model.h5')\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\npath = glob.glob(\n '/Users/zjisuoo/Documents/zjisuoo_git/OurChord/00_NOTE_DATA/TEST/*.png')\nimages = []\nfor image in path:\n n1 = cv2.imread(image)\n n2 = cv2.resize(n1, (244, 244))\n images.append(n2)\n print(image)\nimage_array = np.array(n2)\nnormalized_image_array = image_array.astype(dtype=np.float32) / 127.0 - 1\ndata = normalized_image_array\nprediction = model.predict(data)\nif prediction[0][0] > 0.8:\n print('2분음표')\nelif prediction[0][1] > 0.8:\n print('4분음표')\nelif prediction[0][2] > 0.8:\n print('8분음표')\nelif prediction[0][3] > 0.8:\n print('16분음표')\nelse:\n print('음표아님')\n",
"step-5": "import tensorflow.keras\nfrom PIL import Image, ImageOps\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport glob\nimport cv2\n\nnp.set_printoptions(suppress = True)\n\n# Load the model\nmodel = tensorflow.keras.models.load_model('./converted_keras/keras_model.h5')\n\n# Create the array of the right shape to feed into the keras model\n# The 'length' or number of images you can put into the array is\n# determined by the first position in the shape tuple, in this case 1.\ndata = np.ndarray(shape = (1, 224, 224, 3), dtype = np.float32)\n\npath = glob.glob(\"/Users/zjisuoo/Documents/zjisuoo_git/OurChord/00_NOTE_DATA/TEST/*.png\")\nimages = []\n\nfor image in path :\n n1 = cv2.imread(image)\n n2 = cv2.resize(n1, (244, 244))\n images.append(n2)\n\n print(image)\n\n#turn the image int a numpy array\nimage_array = np.array(n2)\n\n# Normalize the image\nnormalized_image_array = (image_array.astype(dtype = np.float32) / 127.0) - 1\n\n# Load the image into the array\ndata = normalized_image_array\n\n# run the inference\nprediction = model.predict(data)\n# print(prediction)\n\nif(prediction[0][0] > 0.8):\n print(\"2분음표\")\nelif(prediction[0][1] > 0.8):\n print(\"4분음표\")\nelif(prediction[0][2] > 0.8):\n print(\"8분음표\")\nelif(prediction[0][3] > 0.8):\n print(\"16분음표\")\nelse:\n print(\"음표아님\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
permissions = ('restart', )
commands = ('restart', )
def get_command(session, parsed_message):
return 'stop', 'restart'
def parse_response(permission, response):
return response
|
normal
|
{
"blob_id": "acd5cf675522c90fc9fbc96bdeb52f66835626b4",
"index": 3489,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_response(permission, response):\n return response\n",
"step-3": "<mask token>\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-4": "permissions = 'restart',\ncommands = 'restart',\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-5": "permissions = ('restart', )\ncommands = ('restart', )\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def float_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
|
normal
|
{
"blob_id": "c33d625ebd6a40551d2ce0393fd78619601ea7ae",
"index": 5834,
"step-1": "<mask token>\n\n\nclass Dataset(object):\n\n def __init__(self):\n self.items_descriptions = {'image':\n 'A color image of varying height and width.', 'shape':\n 'Shape of the image', 'object/bbox':\n 'A list of bounding boxes, one per each object.',\n 'object/label': 'A list of labels, one per each object.'}\n self.features = {'image/encoded': tf.FixedLenFeature((), tf.string,\n default_value=''), 'image/format': tf.FixedLenFeature((), tf.\n string, default_value='jpeg'), 'image/object/bbox/xmin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.\n VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult':\n tf.VarLenFeature(dtype=tf.int64)}\n self.items = {'image': slim.tfexample_decoder.Image('image/encoded',\n 'image/format'), 'gt_bboxes': slim.tfexample_decoder.\n BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],\n 'image/object/bbox/'), 'gt_labels': slim.tfexample_decoder.\n Tensor('image/object/bbox/label'), 'difficult_objects': slim.\n tfexample_decoder.Tensor('image/object/bbox/difficult')}\n\n def read_dataset_from_tfrecords(self, dataset_name, train_or_test,\n dataset_path):\n with tf.name_scope(None, 'read_dataset_from_tfrecords') as scope:\n if (dataset_name == 'pascalvoc_2007' or dataset_name ==\n 'pascalvoc_2012'):\n dataset = self.load_dataset(dataset_name, train_or_test,\n dataset_path)\n return dataset\n\n def load_dataset(self, dataset_name, train_or_test, dataset_path):\n dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'\n if dataset_name == 'pascalvoc_2007':\n train_test_sizes = {'train': FLAGS.pascalvoc_2007_train_size,\n 'test': FLAGS.pascalvoc_2007_test_size}\n elif dataset_name == 'pascalvoc_2012':\n train_test_sizes = {'train': FLAGS.pascalvoc_2012_train_size}\n dataset_file_name = os.path.join(dataset_path, dataset_file_name %\n train_or_test)\n reader = tf.TFRecordReader\n decoder = slim.tfexample_decoder.TFExampleDecoder(self.features,\n self.items)\n return slim.dataset.Dataset(data_sources=dataset_file_name, reader=\n reader, decoder=decoder, num_samples=train_test_sizes[\n train_or_test], items_to_descriptions=self.items_descriptions,\n num_classes=FLAGS.num_classes - 1, labels_to_names=None)\n\n def get_groundtruth_from_dataset(self, dataset, train_or_test):\n with tf.name_scope(None, 'get_groundtruth_from_dataset') as scope:\n if train_or_test == 'test':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.test_num_readers,\n common_queue_capacity=FLAGS.test_common_queue_capacity,\n common_queue_min=FLAGS.test_batch_size, shuffle=FLAGS.\n test_shuffle)\n elif train_or_test == 'train':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.train_num_readers,\n common_queue_capacity=FLAGS.train_common_queue_capacity,\n common_queue_min=10 * FLAGS.train_batch_size, shuffle=\n FLAGS.train_shuffle)\n [image, gt_bboxes, gt_labels] = provider.get(['image',\n 'gt_bboxes', 'gt_labels'])\n gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64\n )\n if FLAGS.test_discard_difficult_objects:\n [gt_difficult_objects] = provider.get(['difficult_objects'])\n return [image, gt_bboxes, gt_labels, gt_difficult_objects]\n\n def _process_image_PascalVOC(self, directory, name):\n filename = directory + DIRECTORY_IMAGES + name + '.jpg'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml'\n )\n tree = ET.parse(filename)\n root = tree.getroot()\n size = root.find('size')\n shape = [int(size.find('height').text), int(size.find('width').text\n ), int(size.find('depth').text)]\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels.append(int(VOC_LABELS[label][0]))\n labels_text.append(label.encode('ascii'))\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0], float(\n bbox.find('xmin').text) / shape[1], float(bbox.find('ymax')\n .text) / shape[0], float(bbox.find('xmax').text) / shape[1]))\n return (image_data, shape, bboxes, labels, labels_text, difficult,\n truncated)\n\n def _convert_to_example_PascalVOC(self, image_data, labels, labels_text,\n bboxes, shape, difficult, truncated):\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.int64_feature(shape[0]), 'image/width':\n self.int64_feature(shape[1]), 'image/channels': self.\n int64_feature(shape[2]), 'image/shape': self.int64_feature(\n shape), 'image/object/bbox/xmin': self.float_feature(xmin),\n 'image/object/bbox/xmax': self.float_feature(xmax),\n 'image/object/bbox/ymin': self.float_feature(ymin),\n 'image/object/bbox/ymax': self.float_feature(ymax),\n 'image/object/bbox/label': self.int64_feature(labels),\n 'image/object/bbox/label_text': self.bytes_feature(labels_text),\n 'image/object/bbox/difficult': self.int64_feature(difficult),\n 'image/object/bbox/truncated': self.int64_feature(truncated),\n 'image/format': self.bytes_feature(image_format),\n 'image/encoded': self.bytes_feature(image_data)}))\n return example\n <mask token>\n\n def _get_output_filename_PascalVOC(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n <mask token>\n\n def int64_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n def float_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n def bytes_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n",
"step-2": "<mask token>\n\n\nclass Dataset(object):\n\n def __init__(self):\n self.items_descriptions = {'image':\n 'A color image of varying height and width.', 'shape':\n 'Shape of the image', 'object/bbox':\n 'A list of bounding boxes, one per each object.',\n 'object/label': 'A list of labels, one per each object.'}\n self.features = {'image/encoded': tf.FixedLenFeature((), tf.string,\n default_value=''), 'image/format': tf.FixedLenFeature((), tf.\n string, default_value='jpeg'), 'image/object/bbox/xmin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.\n VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult':\n tf.VarLenFeature(dtype=tf.int64)}\n self.items = {'image': slim.tfexample_decoder.Image('image/encoded',\n 'image/format'), 'gt_bboxes': slim.tfexample_decoder.\n BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],\n 'image/object/bbox/'), 'gt_labels': slim.tfexample_decoder.\n Tensor('image/object/bbox/label'), 'difficult_objects': slim.\n tfexample_decoder.Tensor('image/object/bbox/difficult')}\n\n def read_dataset_from_tfrecords(self, dataset_name, train_or_test,\n dataset_path):\n with tf.name_scope(None, 'read_dataset_from_tfrecords') as scope:\n if (dataset_name == 'pascalvoc_2007' or dataset_name ==\n 'pascalvoc_2012'):\n dataset = self.load_dataset(dataset_name, train_or_test,\n dataset_path)\n return dataset\n\n def load_dataset(self, dataset_name, train_or_test, dataset_path):\n dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'\n if dataset_name == 'pascalvoc_2007':\n train_test_sizes = {'train': FLAGS.pascalvoc_2007_train_size,\n 'test': FLAGS.pascalvoc_2007_test_size}\n elif dataset_name == 'pascalvoc_2012':\n train_test_sizes = {'train': FLAGS.pascalvoc_2012_train_size}\n dataset_file_name = os.path.join(dataset_path, dataset_file_name %\n train_or_test)\n reader = tf.TFRecordReader\n decoder = slim.tfexample_decoder.TFExampleDecoder(self.features,\n self.items)\n return slim.dataset.Dataset(data_sources=dataset_file_name, reader=\n reader, decoder=decoder, num_samples=train_test_sizes[\n train_or_test], items_to_descriptions=self.items_descriptions,\n num_classes=FLAGS.num_classes - 1, labels_to_names=None)\n\n def get_groundtruth_from_dataset(self, dataset, train_or_test):\n with tf.name_scope(None, 'get_groundtruth_from_dataset') as scope:\n if train_or_test == 'test':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.test_num_readers,\n common_queue_capacity=FLAGS.test_common_queue_capacity,\n common_queue_min=FLAGS.test_batch_size, shuffle=FLAGS.\n test_shuffle)\n elif train_or_test == 'train':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.train_num_readers,\n common_queue_capacity=FLAGS.train_common_queue_capacity,\n common_queue_min=10 * FLAGS.train_batch_size, shuffle=\n FLAGS.train_shuffle)\n [image, gt_bboxes, gt_labels] = provider.get(['image',\n 'gt_bboxes', 'gt_labels'])\n gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64\n )\n if FLAGS.test_discard_difficult_objects:\n [gt_difficult_objects] = provider.get(['difficult_objects'])\n return [image, gt_bboxes, gt_labels, gt_difficult_objects]\n\n def _process_image_PascalVOC(self, directory, name):\n filename = directory + DIRECTORY_IMAGES + name + '.jpg'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml'\n )\n tree = ET.parse(filename)\n root = tree.getroot()\n size = root.find('size')\n shape = [int(size.find('height').text), int(size.find('width').text\n ), int(size.find('depth').text)]\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels.append(int(VOC_LABELS[label][0]))\n labels_text.append(label.encode('ascii'))\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0], float(\n bbox.find('xmin').text) / shape[1], float(bbox.find('ymax')\n .text) / shape[0], float(bbox.find('xmax').text) / shape[1]))\n return (image_data, shape, bboxes, labels, labels_text, difficult,\n truncated)\n\n def _convert_to_example_PascalVOC(self, image_data, labels, labels_text,\n bboxes, shape, difficult, truncated):\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.int64_feature(shape[0]), 'image/width':\n self.int64_feature(shape[1]), 'image/channels': self.\n int64_feature(shape[2]), 'image/shape': self.int64_feature(\n shape), 'image/object/bbox/xmin': self.float_feature(xmin),\n 'image/object/bbox/xmax': self.float_feature(xmax),\n 'image/object/bbox/ymin': self.float_feature(ymin),\n 'image/object/bbox/ymax': self.float_feature(ymax),\n 'image/object/bbox/label': self.int64_feature(labels),\n 'image/object/bbox/label_text': self.bytes_feature(labels_text),\n 'image/object/bbox/difficult': self.int64_feature(difficult),\n 'image/object/bbox/truncated': self.int64_feature(truncated),\n 'image/format': self.bytes_feature(image_format),\n 'image/encoded': self.bytes_feature(image_data)}))\n return example\n\n def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):\n (image_data, shape, bboxes, labels, labels_text, difficult, truncated\n ) = self._process_image_PascalVOC(dataset_dir, name)\n example = self._convert_to_example_PascalVOC(image_data, labels,\n labels_text, bboxes, shape, difficult, truncated)\n tfrecord_writer.write(example.SerializeToString())\n\n def _get_output_filename_PascalVOC(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n <mask token>\n\n def int64_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n def float_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n def bytes_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n",
"step-3": "<mask token>\n\n\nclass Dataset(object):\n\n def __init__(self):\n self.items_descriptions = {'image':\n 'A color image of varying height and width.', 'shape':\n 'Shape of the image', 'object/bbox':\n 'A list of bounding boxes, one per each object.',\n 'object/label': 'A list of labels, one per each object.'}\n self.features = {'image/encoded': tf.FixedLenFeature((), tf.string,\n default_value=''), 'image/format': tf.FixedLenFeature((), tf.\n string, default_value='jpeg'), 'image/object/bbox/xmin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.\n VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult':\n tf.VarLenFeature(dtype=tf.int64)}\n self.items = {'image': slim.tfexample_decoder.Image('image/encoded',\n 'image/format'), 'gt_bboxes': slim.tfexample_decoder.\n BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],\n 'image/object/bbox/'), 'gt_labels': slim.tfexample_decoder.\n Tensor('image/object/bbox/label'), 'difficult_objects': slim.\n tfexample_decoder.Tensor('image/object/bbox/difficult')}\n\n def read_dataset_from_tfrecords(self, dataset_name, train_or_test,\n dataset_path):\n with tf.name_scope(None, 'read_dataset_from_tfrecords') as scope:\n if (dataset_name == 'pascalvoc_2007' or dataset_name ==\n 'pascalvoc_2012'):\n dataset = self.load_dataset(dataset_name, train_or_test,\n dataset_path)\n return dataset\n\n def load_dataset(self, dataset_name, train_or_test, dataset_path):\n dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'\n if dataset_name == 'pascalvoc_2007':\n train_test_sizes = {'train': FLAGS.pascalvoc_2007_train_size,\n 'test': FLAGS.pascalvoc_2007_test_size}\n elif dataset_name == 'pascalvoc_2012':\n train_test_sizes = {'train': FLAGS.pascalvoc_2012_train_size}\n dataset_file_name = os.path.join(dataset_path, dataset_file_name %\n train_or_test)\n reader = tf.TFRecordReader\n decoder = slim.tfexample_decoder.TFExampleDecoder(self.features,\n self.items)\n return slim.dataset.Dataset(data_sources=dataset_file_name, reader=\n reader, decoder=decoder, num_samples=train_test_sizes[\n train_or_test], items_to_descriptions=self.items_descriptions,\n num_classes=FLAGS.num_classes - 1, labels_to_names=None)\n\n def get_groundtruth_from_dataset(self, dataset, train_or_test):\n with tf.name_scope(None, 'get_groundtruth_from_dataset') as scope:\n if train_or_test == 'test':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.test_num_readers,\n common_queue_capacity=FLAGS.test_common_queue_capacity,\n common_queue_min=FLAGS.test_batch_size, shuffle=FLAGS.\n test_shuffle)\n elif train_or_test == 'train':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.train_num_readers,\n common_queue_capacity=FLAGS.train_common_queue_capacity,\n common_queue_min=10 * FLAGS.train_batch_size, shuffle=\n FLAGS.train_shuffle)\n [image, gt_bboxes, gt_labels] = provider.get(['image',\n 'gt_bboxes', 'gt_labels'])\n gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64\n )\n if FLAGS.test_discard_difficult_objects:\n [gt_difficult_objects] = provider.get(['difficult_objects'])\n return [image, gt_bboxes, gt_labels, gt_difficult_objects]\n\n def _process_image_PascalVOC(self, directory, name):\n filename = directory + DIRECTORY_IMAGES + name + '.jpg'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml'\n )\n tree = ET.parse(filename)\n root = tree.getroot()\n size = root.find('size')\n shape = [int(size.find('height').text), int(size.find('width').text\n ), int(size.find('depth').text)]\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels.append(int(VOC_LABELS[label][0]))\n labels_text.append(label.encode('ascii'))\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0], float(\n bbox.find('xmin').text) / shape[1], float(bbox.find('ymax')\n .text) / shape[0], float(bbox.find('xmax').text) / shape[1]))\n return (image_data, shape, bboxes, labels, labels_text, difficult,\n truncated)\n\n def _convert_to_example_PascalVOC(self, image_data, labels, labels_text,\n bboxes, shape, difficult, truncated):\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.int64_feature(shape[0]), 'image/width':\n self.int64_feature(shape[1]), 'image/channels': self.\n int64_feature(shape[2]), 'image/shape': self.int64_feature(\n shape), 'image/object/bbox/xmin': self.float_feature(xmin),\n 'image/object/bbox/xmax': self.float_feature(xmax),\n 'image/object/bbox/ymin': self.float_feature(ymin),\n 'image/object/bbox/ymax': self.float_feature(ymax),\n 'image/object/bbox/label': self.int64_feature(labels),\n 'image/object/bbox/label_text': self.bytes_feature(labels_text),\n 'image/object/bbox/difficult': self.int64_feature(difficult),\n 'image/object/bbox/truncated': self.int64_feature(truncated),\n 'image/format': self.bytes_feature(image_format),\n 'image/encoded': self.bytes_feature(image_data)}))\n return example\n\n def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):\n (image_data, shape, bboxes, labels, labels_text, difficult, truncated\n ) = self._process_image_PascalVOC(dataset_dir, name)\n example = self._convert_to_example_PascalVOC(image_data, labels,\n labels_text, bboxes, shape, difficult, truncated)\n tfrecord_writer.write(example.SerializeToString())\n\n def _get_output_filename_PascalVOC(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n\n def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train',\n shuffling=False):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)\n filenames = sorted(os.listdir(path))\n if shuffling:\n random.seed(RANDOM_SEED)\n random.shuffle(filenames)\n i = 0\n fidx = 0\n while i < len(filenames):\n tf_filename = self._get_output_filename(output_dir, name, fidx)\n with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:\n j = 0\n while i < len(filenames) and j < SAMPLES_PER_FILES:\n sys.stdout.write('\\r>> Converting image %d/%d' % (i + 1,\n len(filenames)))\n sys.stdout.flush()\n filename = filenames[i]\n img_name = filename[:-4]\n self._add_to_tfrecord_PascalVOC(dataset_dir, img_name,\n tfrecord_writer)\n i += 1\n j += 1\n fidx += 1\n print('\\n ImageDB to TF conversion finished. ')\n\n def int64_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n def float_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n def bytes_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n",
"step-4": "<mask token>\nDIRECTORY_ANNOTATIONS = 'Annotations/'\nDIRECTORY_IMAGES = 'JPEGImages/'\nRANDOM_SEED = 4242\nSAMPLES_PER_FILES = 200\nslim = tf.contrib.slim\n\n\nclass Dataset(object):\n\n def __init__(self):\n self.items_descriptions = {'image':\n 'A color image of varying height and width.', 'shape':\n 'Shape of the image', 'object/bbox':\n 'A list of bounding boxes, one per each object.',\n 'object/label': 'A list of labels, one per each object.'}\n self.features = {'image/encoded': tf.FixedLenFeature((), tf.string,\n default_value=''), 'image/format': tf.FixedLenFeature((), tf.\n string, default_value='jpeg'), 'image/object/bbox/xmin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.\n VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.\n VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult':\n tf.VarLenFeature(dtype=tf.int64)}\n self.items = {'image': slim.tfexample_decoder.Image('image/encoded',\n 'image/format'), 'gt_bboxes': slim.tfexample_decoder.\n BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],\n 'image/object/bbox/'), 'gt_labels': slim.tfexample_decoder.\n Tensor('image/object/bbox/label'), 'difficult_objects': slim.\n tfexample_decoder.Tensor('image/object/bbox/difficult')}\n\n def read_dataset_from_tfrecords(self, dataset_name, train_or_test,\n dataset_path):\n with tf.name_scope(None, 'read_dataset_from_tfrecords') as scope:\n if (dataset_name == 'pascalvoc_2007' or dataset_name ==\n 'pascalvoc_2012'):\n dataset = self.load_dataset(dataset_name, train_or_test,\n dataset_path)\n return dataset\n\n def load_dataset(self, dataset_name, train_or_test, dataset_path):\n dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'\n if dataset_name == 'pascalvoc_2007':\n train_test_sizes = {'train': FLAGS.pascalvoc_2007_train_size,\n 'test': FLAGS.pascalvoc_2007_test_size}\n elif dataset_name == 'pascalvoc_2012':\n train_test_sizes = {'train': FLAGS.pascalvoc_2012_train_size}\n dataset_file_name = os.path.join(dataset_path, dataset_file_name %\n train_or_test)\n reader = tf.TFRecordReader\n decoder = slim.tfexample_decoder.TFExampleDecoder(self.features,\n self.items)\n return slim.dataset.Dataset(data_sources=dataset_file_name, reader=\n reader, decoder=decoder, num_samples=train_test_sizes[\n train_or_test], items_to_descriptions=self.items_descriptions,\n num_classes=FLAGS.num_classes - 1, labels_to_names=None)\n\n def get_groundtruth_from_dataset(self, dataset, train_or_test):\n with tf.name_scope(None, 'get_groundtruth_from_dataset') as scope:\n if train_or_test == 'test':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.test_num_readers,\n common_queue_capacity=FLAGS.test_common_queue_capacity,\n common_queue_min=FLAGS.test_batch_size, shuffle=FLAGS.\n test_shuffle)\n elif train_or_test == 'train':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, num_readers=FLAGS.train_num_readers,\n common_queue_capacity=FLAGS.train_common_queue_capacity,\n common_queue_min=10 * FLAGS.train_batch_size, shuffle=\n FLAGS.train_shuffle)\n [image, gt_bboxes, gt_labels] = provider.get(['image',\n 'gt_bboxes', 'gt_labels'])\n gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64\n )\n if FLAGS.test_discard_difficult_objects:\n [gt_difficult_objects] = provider.get(['difficult_objects'])\n return [image, gt_bboxes, gt_labels, gt_difficult_objects]\n\n def _process_image_PascalVOC(self, directory, name):\n filename = directory + DIRECTORY_IMAGES + name + '.jpg'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml'\n )\n tree = ET.parse(filename)\n root = tree.getroot()\n size = root.find('size')\n shape = [int(size.find('height').text), int(size.find('width').text\n ), int(size.find('depth').text)]\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels.append(int(VOC_LABELS[label][0]))\n labels_text.append(label.encode('ascii'))\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0], float(\n bbox.find('xmin').text) / shape[1], float(bbox.find('ymax')\n .text) / shape[0], float(bbox.find('xmax').text) / shape[1]))\n return (image_data, shape, bboxes, labels, labels_text, difficult,\n truncated)\n\n def _convert_to_example_PascalVOC(self, image_data, labels, labels_text,\n bboxes, shape, difficult, truncated):\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.int64_feature(shape[0]), 'image/width':\n self.int64_feature(shape[1]), 'image/channels': self.\n int64_feature(shape[2]), 'image/shape': self.int64_feature(\n shape), 'image/object/bbox/xmin': self.float_feature(xmin),\n 'image/object/bbox/xmax': self.float_feature(xmax),\n 'image/object/bbox/ymin': self.float_feature(ymin),\n 'image/object/bbox/ymax': self.float_feature(ymax),\n 'image/object/bbox/label': self.int64_feature(labels),\n 'image/object/bbox/label_text': self.bytes_feature(labels_text),\n 'image/object/bbox/difficult': self.int64_feature(difficult),\n 'image/object/bbox/truncated': self.int64_feature(truncated),\n 'image/format': self.bytes_feature(image_format),\n 'image/encoded': self.bytes_feature(image_data)}))\n return example\n\n def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):\n (image_data, shape, bboxes, labels, labels_text, difficult, truncated\n ) = self._process_image_PascalVOC(dataset_dir, name)\n example = self._convert_to_example_PascalVOC(image_data, labels,\n labels_text, bboxes, shape, difficult, truncated)\n tfrecord_writer.write(example.SerializeToString())\n\n def _get_output_filename_PascalVOC(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n\n def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train',\n shuffling=False):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)\n filenames = sorted(os.listdir(path))\n if shuffling:\n random.seed(RANDOM_SEED)\n random.shuffle(filenames)\n i = 0\n fidx = 0\n while i < len(filenames):\n tf_filename = self._get_output_filename(output_dir, name, fidx)\n with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:\n j = 0\n while i < len(filenames) and j < SAMPLES_PER_FILES:\n sys.stdout.write('\\r>> Converting image %d/%d' % (i + 1,\n len(filenames)))\n sys.stdout.flush()\n filename = filenames[i]\n img_name = filename[:-4]\n self._add_to_tfrecord_PascalVOC(dataset_dir, img_name,\n tfrecord_writer)\n i += 1\n j += 1\n fidx += 1\n print('\\n ImageDB to TF conversion finished. ')\n\n def int64_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n def float_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n def bytes_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n",
"step-5": "\n# This module is used to load pascalvoc datasets (2007 or 2012)\nimport os\nimport tensorflow as tf\nfrom configs.config_common import *\nfrom configs.config_train import *\nfrom configs.config_test import *\nimport sys\nimport random\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\n# Original dataset organisation.\nDIRECTORY_ANNOTATIONS = 'Annotations/'\nDIRECTORY_IMAGES = 'JPEGImages/'\n\n# TFRecords convertion parameters.\nRANDOM_SEED = 4242\nSAMPLES_PER_FILES = 200\n\nslim = tf.contrib.slim\n\n\n\nclass Dataset(object):\n\n def __init__(self):\n # Descriptions of the image items\n self.items_descriptions = {\n 'image': 'A color image of varying height and width.',\n 'shape': 'Shape of the image',\n 'object/bbox': 'A list of bounding boxes, one per each object.',\n 'object/label': 'A list of labels, one per each object.',\n }\n # Features of Pascal VOC TFRecords.\n self.features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\n 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),\n }\n # Items in Pascal VOC TFRecords.\n self.items = {\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\n 'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),\n 'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\n 'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),\n }\n\n\n\n # This function reads dataset from tfrecords\n # Inputs:\n # datase_name: pascalvoc_2007\n # train_or_test: test\n # dataset_path: './tfrecords_test/'\n # Outputs:\n # loaded dataset\n def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):\n with tf.name_scope(None, \"read_dataset_from_tfrecords\") as scope:\n if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':\n dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)\n return dataset\n\n\n\n # This function is used to load pascalvoc2007 or psaclvoc2012 datasets\n # Inputs:\n # dataset_name: pascalvoc_2007\n # train_or_test: test\n # dataset_path: './tfrecords_test/'\n # Output:\n # loaded dataset \n def load_dataset(self, dataset_name, train_or_test, dataset_path):\n dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'\n if dataset_name == 'pascalvoc_2007':\n train_test_sizes = {\n 'train': FLAGS.pascalvoc_2007_train_size,\n 'test': FLAGS.pascalvoc_2007_test_size,\n }\n elif dataset_name == 'pascalvoc_2012':\n train_test_sizes = {\n 'train': FLAGS.pascalvoc_2012_train_size, \n } \n dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)\n reader = tf.TFRecordReader\n decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)\n return slim.dataset.Dataset(\n data_sources=dataset_file_name,\n reader=reader,\n decoder=decoder,\n num_samples=train_test_sizes[train_or_test],\n items_to_descriptions=self.items_descriptions,\n num_classes=FLAGS.num_classes-1,\n labels_to_names=None)\n\n\n\n # This function gets groundtruth bboxes & labels from dataset\n # Inputs:\n # dataset\n # train_or_test: train/test\n # Output:\n # image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects\n def get_groundtruth_from_dataset(self, dataset, train_or_test):\n # Dataset provider\n with tf.name_scope(None, \"get_groundtruth_from_dataset\") as scope:\n if train_or_test == 'test':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=FLAGS.test_num_readers,\n common_queue_capacity=FLAGS.test_common_queue_capacity,\n common_queue_min=FLAGS.test_batch_size,\n shuffle=FLAGS.test_shuffle)\n elif train_or_test == 'train':\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers= FLAGS.train_num_readers,\n common_queue_capacity= FLAGS.train_common_queue_capacity,\n common_queue_min= 10 * FLAGS.train_batch_size,\n shuffle=FLAGS.train_shuffle)\n # Get images, groundtruth bboxes & groundtruth labels from database\n [image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])\n # Discard difficult objects\n gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)\n if FLAGS.test_discard_difficult_objects:\n [gt_difficult_objects] = provider.get(['difficult_objects'])\n return [image, gt_bboxes, gt_labels, gt_difficult_objects]\n\n\n\n\n\n ##########################################\n # Convert PascalVOC to TF recorsd\n # Process a image and annotation file.\n # Inputs:\n # filename: string, path to an image file e.g., '/path/to/example.JPG'.\n # coder: instance of ImageCoder to provide TensorFlow image coding utils.\n # Outputs:\n # image_buffer: string, JPEG encoding of RGB image.\n # height: integer, image height in pixels.\n # width: integer, image width in pixels.\n def _process_image_PascalVOC(self, directory, name):\n\n # Read the image file.\n filename = directory + DIRECTORY_IMAGES + name + '.jpg'\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n\n # Read the XML annotation file.\n filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')\n tree = ET.parse(filename)\n root = tree.getroot()\n\n # Image shape.\n size = root.find('size')\n shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]\n # Find annotations.\n bboxes = []\n labels = []\n labels_text = []\n difficult = []\n truncated = []\n for obj in root.findall('object'):\n label = obj.find('name').text\n labels.append(int(VOC_LABELS[label][0]))\n labels_text.append(label.encode('ascii'))\n\n if obj.find('difficult'):\n difficult.append(int(obj.find('difficult').text))\n else:\n difficult.append(0)\n if obj.find('truncated'):\n truncated.append(int(obj.find('truncated').text))\n else:\n truncated.append(0)\n\n bbox = obj.find('bndbox')\n bboxes.append((float(bbox.find('ymin').text) / shape[0],\n float(bbox.find('xmin').text) / shape[1],\n float(bbox.find('ymax').text) / shape[0],\n float(bbox.find('xmax').text) / shape[1]\n ))\n return image_data, shape, bboxes, labels, labels_text, difficult, truncated\n\n\n\n\n # Build an Example proto for an image example.\n # Args:\n # image_data: string, JPEG encoding of RGB image;\n # labels: list of integers, identifier for the ground truth;\n # labels_text: list of strings, human-readable labels;\n # bboxes: list of bounding boxes; each box is a list of integers;\n # shape: 3 integers, image shapes in pixels.\n # Returns:\n # Example proto\n def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n # pylint: disable=expression-not-assigned\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n # pylint: enable=expression-not-assigned\n\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': self.int64_feature(shape[0]),\n 'image/width': self.int64_feature(shape[1]),\n 'image/channels': self.int64_feature(shape[2]),\n 'image/shape': self.int64_feature(shape),\n 'image/object/bbox/xmin': self.float_feature(xmin),\n 'image/object/bbox/xmax': self.float_feature(xmax),\n 'image/object/bbox/ymin': self.float_feature(ymin),\n 'image/object/bbox/ymax': self.float_feature(ymax),\n 'image/object/bbox/label': self.int64_feature(labels),\n 'image/object/bbox/label_text': self.bytes_feature(labels_text),\n 'image/object/bbox/difficult': self.int64_feature(difficult),\n 'image/object/bbox/truncated': self.int64_feature(truncated),\n 'image/format': self.bytes_feature(image_format),\n 'image/encoded': self.bytes_feature(image_data)}))\n return example\n\n\n\n # Loads data from image and annotations files and add them to a TFRecord.\n # Inputs:\n # dataset_dir: Dataset directory;\n # name: Image name to add to the TFRecord;\n # tfrecord_writer: The TFRecord writer to use for writing.\n def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):\n\n image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)\n example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)\n tfrecord_writer.write(example.SerializeToString())\n\n\n\n def _get_output_filename_PascalVOC(output_dir, name, idx):\n return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)\n\n\n\n # Convert images to tfrecords\n # Args:\n # dataset_dir: The dataset directory where the dataset is stored.\n # output_dir: Output directory.\n def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):\n\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n # Dataset filenames, and shuffling.\n path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)\n filenames = sorted(os.listdir(path))\n if shuffling:\n random.seed(RANDOM_SEED)\n random.shuffle(filenames)\n # Process dataset files.\n i = 0\n fidx = 0\n while i < len(filenames):\n # Open new TFRecord file.\n tf_filename = self._get_output_filename(output_dir, name, fidx)\n with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:\n j = 0\n while i < len(filenames) and j < SAMPLES_PER_FILES:\n sys.stdout.write('\\r>> Converting image %d/%d' % (i+1, len(filenames)))\n sys.stdout.flush()\n\n filename = filenames[i]\n img_name = filename[:-4]\n self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)\n i += 1\n j += 1\n fidx += 1\n print('\\n ImageDB to TF conversion finished. ')\n\n\n\n # Wrapper for inserting int64 features into Example proto.\n def int64_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\n # Wrapper for inserting float features into Example proto.\n def float_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\n # Wrapper for inserting bytes features into Example proto.\n def bytes_feature(self, value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n\n\n\n\n\n \n\n\n",
"step-ids": [
11,
12,
13,
14,
16
]
}
|
[
11,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
<|reserved_special_token_0|>
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
_train_velocity = (train_velocity - self._mean[-(self.velocity_dim +
self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.
velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.
vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape
).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],
_train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -
self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,
1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = (self.contact_dim + self.velocity_dim + self.
vel_factor_dim)
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,
right_foot):
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[
0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot
[0] * 3 + 3]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[
1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot
[1] * 3 + 3]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:
right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *
3:right_foot[0] * 3 + 3]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:
right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *
3:right_foot[1] * 3 + 3]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,
right_foot0_vel, right_foot1_vel), -1)
return feet_vel
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,
self.left_feet, self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self
.velocity_dim):-self.velocity_dim] * self._std[-self.
contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.
_mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]
)
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *
feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self
.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1
) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.
root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self
.pos_dim + self.root_pos_dim + self.root_rot_dim]
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],
init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],
init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos
) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot
) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -
root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -
root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item
return loss * 1.5
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
<|reserved_special_token_0|>
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self
.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2,
3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,
4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),
dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=
torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part:part + 1] = factor[:, part:part + 1
] + weight[j] / weight_sum[part] * pow(pow(
velocity[:, i:i + 1, j * 3], 2) + pow(velocity[
:, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:
i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
_train_velocity = (train_velocity - self._mean[-(self.velocity_dim +
self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.
velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.
vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape
).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],
_train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -
self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,
1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = (self.contact_dim + self.velocity_dim + self.
vel_factor_dim)
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,
right_foot):
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[
0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot
[0] * 3 + 3]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[
1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot
[1] * 3 + 3]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:
right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *
3:right_foot[0] * 3 + 3]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:
right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *
3:right_foot[1] * 3 + 3]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,
right_foot0_vel, right_foot1_vel), -1)
return feet_vel
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,
self.left_feet, self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self
.velocity_dim):-self.velocity_dim] * self._std[-self.
contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.
_mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]
)
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *
feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self
.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1
) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.
root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self
.pos_dim + self.root_pos_dim + self.root_rot_dim]
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],
init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],
init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos
) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot
) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -
root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -
root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item
return loss * 1.5
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
def calculate_bone_length_for_seq(self, seq):
src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim
] + self._mean[:self.pos_dim]
new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(
src_seq.shape[2] / 3), 3)
root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.
device)
root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)
root_positions = root_positions.repeat(src_seq.shape[0], src_seq.
shape[1], 1, 1)
positions = torch.cat((root_positions, new_seq), 2)
result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(
src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)
index = 0
for joint, parent in enumerate(self.parents):
if parent == -1:
continue
joint_pos = positions[:, :, joint]
parent_pos = positions[:, :, parent]
delta_x = joint_pos[..., 0] - parent_pos[..., 0]
delta_y = joint_pos[..., 1] - parent_pos[..., 1]
delta_z = joint_pos[..., 2] - parent_pos[..., 2]
length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5
result_list[..., index] = length_temp
index += 1
return result_list
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self
.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2,
3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,
4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),
dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=
torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part:part + 1] = factor[:, part:part + 1
] + weight[j] / weight_sum[part] * pow(pow(
velocity[:, i:i + 1, j * 3], 2) + pow(velocity[
:, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:
i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
_train_velocity = (train_velocity - self._mean[-(self.velocity_dim +
self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.
velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.
vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape
).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],
_train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -
self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,
1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = (self.contact_dim + self.velocity_dim + self.
vel_factor_dim)
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,
right_foot):
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[
0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot
[0] * 3 + 3]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[
1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot
[1] * 3 + 3]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:
right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *
3:right_foot[0] * 3 + 3]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:
right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *
3:right_foot[1] * 3 + 3]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,
right_foot0_vel, right_foot1_vel), -1)
return feet_vel
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,
self.left_feet, self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self
.velocity_dim):-self.velocity_dim] * self._std[-self.
contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.
_mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]
)
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *
feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self
.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1
) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.
root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self
.pos_dim + self.root_pos_dim + self.root_rot_dim]
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],
init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],
init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos
) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot
) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -
root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -
root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item
return loss * 1.5
<|reserved_special_token_1|>
import torch
import torch.nn as nn
class ReconstructionLoss(nn.Module):
def __init__(self, config):
super(ReconstructionLoss, self).__init__()
self.velocity_dim = config.velocity_dim
def forward(self, pre_seq, gt_seq):
MSE_loss = nn.MSELoss()
rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :]
) + MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + MSE_loss(
pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.
velocity_dim])
return rec_loss * 3
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
def calculate_bone_length_for_seq(self, seq):
src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim
] + self._mean[:self.pos_dim]
new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(
src_seq.shape[2] / 3), 3)
root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.
device)
root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)
root_positions = root_positions.repeat(src_seq.shape[0], src_seq.
shape[1], 1, 1)
positions = torch.cat((root_positions, new_seq), 2)
result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(
src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)
index = 0
for joint, parent in enumerate(self.parents):
if parent == -1:
continue
joint_pos = positions[:, :, joint]
parent_pos = positions[:, :, parent]
delta_x = joint_pos[..., 0] - parent_pos[..., 0]
delta_y = joint_pos[..., 1] - parent_pos[..., 1]
delta_z = joint_pos[..., 2] - parent_pos[..., 2]
length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5
result_list[..., index] = length_temp
index += 1
return result_list
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self
.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2,
3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,
4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),
dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=
torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part:part + 1] = factor[:, part:part + 1
] + weight[j] / weight_sum[part] * pow(pow(
velocity[:, i:i + 1, j * 3], 2) + pow(velocity[
:, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:
i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
_train_velocity = (train_velocity - self._mean[-(self.velocity_dim +
self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.
velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.
vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape
).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],
_train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -
self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,
1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = (self.contact_dim + self.velocity_dim + self.
vel_factor_dim)
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,
right_foot):
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),
src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[
0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot
[0] * 3 + 3]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[
1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot
[1] * 3 + 3]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:
right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *
3:right_foot[0] * 3 + 3]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:
right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *
3:right_foot[1] * 3 + 3]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,
right_foot0_vel, right_foot1_vel), -1)
return feet_vel
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim
] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:
self.pos_dim + self.root_pos_dim]
src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim
] + self._mean[:self.pos_dim + self.root_pos_dim]
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,
self.left_feet, self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self
.velocity_dim):-self.velocity_dim] * self._std[-self.
contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.
_mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]
)
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *
feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self
.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +
self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1
) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.
root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.
root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self
.pos_dim + self.root_pos_dim + self.root_rot_dim]
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],
init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],
init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos
) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot
) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -
root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -
root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item
return loss * 1.5
<|reserved_special_token_1|>
import torch
import torch.nn as nn
class ReconstructionLoss(nn.Module):
def __init__(self, config):
super(ReconstructionLoss, self).__init__()
self.velocity_dim = config.velocity_dim
def forward(self, pre_seq, gt_seq):
MSE_loss = nn.MSELoss()
rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :])+ \
MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + \
MSE_loss(pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.velocity_dim])
return rec_loss * 3
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
def calculate_bone_length_for_seq(self, seq):
# AddBackward0 [batch_size, T, size]
src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim] + self._mean[:self.pos_dim]
# ViewBackward [batch_size, T, J-1, 3]
new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3), 3)
root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.device)
root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)
root_positions = root_positions.repeat(src_seq.shape[0], src_seq.shape[1], 1, 1)
# CatBackward [batch_size, T, J, 3]
positions = torch.cat((root_positions, new_seq), 2)
# [200, 6, 23]
result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3)),
dtype=torch.float32).to(self.device)
index = 0
for joint, parent in enumerate(self.parents):
if parent == -1:
continue
# [200, 6, 3] SelectBackward
joint_pos = positions[:, :, joint]
parent_pos = positions[:, :, parent]
# [200, 6] SubBackward0
delta_x = joint_pos[..., 0] - parent_pos[..., 0]
delta_y = joint_pos[..., 1] - parent_pos[..., 1]
delta_z = joint_pos[..., 2] - parent_pos[..., 2]
# [200, 6] PowBackward0
length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5
result_list[..., index] = length_temp
index += 1
return result_list
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4, 4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part: part + 1] = factor[:, part: part + 1] + weight[j] / weight_sum[part] * \
pow(pow(velocity[:, i:i + 1, j * 3], 2) +
pow(velocity[:, i:i + 1, j * 3 + 1], 2) +
pow(velocity[:, i:i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
# velocity
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
# grad_fn=<DivBackward0>
_train_velocity = (train_velocity -
self._mean[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]) \
/ self._std[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:], _train_velocity[:, 1:, :]) * 10 \
+ MSE_loss(predict_seq[:, 0, -self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:, 1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = self.contact_dim + self.velocity_dim + self.vel_factor_dim
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot, right_foot):
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:(left_foot[0] * 3 + 3)]
- temp_positions[:, :-1, left_foot[0] * 3:(left_foot[0] * 3 + 3)]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:(left_foot[1] * 3 + 3)]
- temp_positions[:, :-1, left_foot[1] * 3:(left_foot[1] * 3 + 3)]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:(right_foot[0] * 3 + 3)]
- temp_positions[:, :-1, right_foot[0] * 3:(right_foot[0] * 3 + 3)]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:(right_foot[1] * 3 + 3)]
- temp_positions[:, :-1, right_foot[1] * 3:(right_foot[1] * 3 + 3)]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel, right_foot0_vel, right_foot1_vel), -1)
return feet_vel # [batch_size, seq_size, 4]
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos, self.left_feet,
self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self.velocity_dim):-self.velocity_dim] *
self._std[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + \
self._mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)])
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact * feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
# pos_seq SliceBackward
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
# MeanBackward0
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :], init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :], init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos) + \
MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot) + \
MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] - root_pos_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] - root_rot_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
# AddBackward0
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item # DivBackward0
return loss * 1.5
|
flexible
|
{
"blob_id": "edc66bdc365f9c40ee33249bd2d02c0c5f28256a",
"index": 8386,
"step-1": "<mask token>\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n <mask token>\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-2": "<mask token>\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n <mask token>\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-3": "<mask token>\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim\n ] + self._mean[:self.pos_dim]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3), 3)\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.\n device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.\n shape[1], 1, 1)\n positions = torch.cat((root_positions, new_seq), 2)\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass ReconstructionLoss(nn.Module):\n\n def __init__(self, config):\n super(ReconstructionLoss, self).__init__()\n self.velocity_dim = config.velocity_dim\n\n def forward(self, pre_seq, gt_seq):\n MSE_loss = nn.MSELoss()\n rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :]\n ) + MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + MSE_loss(\n pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.\n velocity_dim])\n return rec_loss * 3\n\n\nclass BoneLoss(nn.Module):\n\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim\n ] + self._mean[:self.pos_dim]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3), 3)\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.\n device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.\n shape[1], 1, 1)\n positions = torch.cat((root_positions, new_seq), 2)\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(\n src_seq.shape[2] / 3)), dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self\n .gt_bone_length)\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, \n 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4,\n 4, 0, 0, 0]\n weight_sum = []\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim),\n dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=\n torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part:part + 1] = factor[:, part:part + 1\n ] + weight[j] / weight_sum[part] * pow(pow(\n velocity[:, i:i + 1, j * 3], 2) + pow(velocity[\n :, i:i + 1, j * 3 + 1], 2) + pow(velocity[:, i:\n i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n _train_velocity = (train_velocity - self._mean[-(self.velocity_dim +\n self.vel_factor_dim):-self.vel_factor_dim]) / self._std[-(self.\n velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n train_vel_factor = self.get_vel_factor(train_velocity)\n _train_vel_factor = (train_vel_factor - self._mean[-self.\n vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape\n ).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:],\n _train_velocity[:, 1:, :]) * 10 + MSE_loss(predict_seq[:, 0, -\n self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:,\n 1:, :]) * 10\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = (self.contact_dim + self.velocity_dim + self.\n vel_factor_dim)\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot,\n right_foot):\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1),\n src_pos_seq), 1)\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:left_foot[\n 0] * 3 + 3] - temp_positions[:, :-1, left_foot[0] * 3:left_foot\n [0] * 3 + 3]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:left_foot[\n 1] * 3 + 3] - temp_positions[:, :-1, left_foot[1] * 3:left_foot\n [1] * 3 + 3]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:\n right_foot[0] * 3 + 3] - temp_positions[:, :-1, right_foot[0] *\n 3:right_foot[0] * 3 + 3]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:\n right_foot[1] * 3 + 3] - temp_positions[:, :-1, right_foot[1] *\n 3:right_foot[1] * 3 + 3]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel,\n right_foot0_vel, right_foot1_vel), -1)\n return feet_vel\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = predict_seq[..., :self.pos_dim + self.root_pos_dim\n ] * self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:\n self.pos_dim + self.root_pos_dim]\n src_init_pos = init_pos * self._std[:self.pos_dim + self.root_pos_dim\n ] + self._mean[:self.pos_dim + self.root_pos_dim]\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos,\n self.left_feet, self.right_feet)\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self\n .velocity_dim):-self.velocity_dim] * self._std[-self.\n contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + self.\n _mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)]\n )\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact *\n feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self\n .root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim +\n self.root_rot_dim]\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1\n ) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.\n root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:self\n .pos_dim + self.root_pos_dim + self.root_rot_dim]\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :],\n init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :],\n init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos\n ) + MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot\n ) + MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] -\n root_pos_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] -\n root_rot_seq[:, idx - 1, :], 2)) / batch_size / seq_num\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item\n return loss * 1.5\n",
"step-5": "import torch\nimport torch.nn as nn\n\n\nclass ReconstructionLoss(nn.Module):\n def __init__(self, config):\n super(ReconstructionLoss, self).__init__()\n self.velocity_dim = config.velocity_dim\n\n def forward(self, pre_seq, gt_seq):\n MSE_loss = nn.MSELoss()\n rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :])+ \\\n MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + \\\n MSE_loss(pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.velocity_dim])\n return rec_loss * 3\n\n\nclass BoneLoss(nn.Module):\n def __init__(self, gt_bone_length, parents, _mean, _std, config):\n super(BoneLoss, self).__init__()\n self.gt_bone_length = gt_bone_length\n self.parents = parents\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.pos_dim = config.pos_dim\n\n def calculate_bone_length_for_seq(self, seq):\n # AddBackward0 [batch_size, T, size]\n src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim] + self._mean[:self.pos_dim]\n\n # ViewBackward [batch_size, T, J-1, 3]\n new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3), 3)\n\n root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.device)\n root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)\n root_positions = root_positions.repeat(src_seq.shape[0], src_seq.shape[1], 1, 1)\n # CatBackward [batch_size, T, J, 3]\n positions = torch.cat((root_positions, new_seq), 2)\n\n # [200, 6, 23]\n result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3)),\n dtype=torch.float32).to(self.device)\n index = 0\n for joint, parent in enumerate(self.parents):\n if parent == -1:\n continue\n # [200, 6, 3] SelectBackward\n joint_pos = positions[:, :, joint]\n parent_pos = positions[:, :, parent]\n # [200, 6] SubBackward0\n delta_x = joint_pos[..., 0] - parent_pos[..., 0]\n delta_y = joint_pos[..., 1] - parent_pos[..., 1]\n delta_z = joint_pos[..., 2] - parent_pos[..., 2]\n # [200, 6] PowBackward0\n length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5\n result_list[..., index] = length_temp\n index += 1\n return result_list\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n train_bone_length = self.calculate_bone_length_for_seq(predict_seq)\n _, gt_bone_length = torch.broadcast_tensors(train_bone_length, self.gt_bone_length)\n\n MSE_loss = nn.MSELoss()\n bone_loss = MSE_loss(train_bone_length, gt_bone_length)\n\n return bone_loss * 2\n\n\nclass VelocityLoss(nn.Module):\n def __init__(self, _mean, _std, config):\n super(VelocityLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.velocity_dim = config.velocity_dim\n self.vel_factor_dim = config.vel_factor_dim\n\n def calculate_velocity(self, src_pos_seq, src_init_pos):\n \"\"\"\n :param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]\n :param init_pos: the position of initial frame\n :return:\n \"\"\"\n # [batch_size, T + 1, J * 3] grad_fn=<CatBackward>\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)\n velocity = temp_positions[:, 1:] - temp_positions[:, :-1]\n return velocity\n\n def get_vel_factor(self, velocity):\n batch_size = velocity.shape[0]\n seq_len = velocity.shape[1]\n joint_num = int(velocity.shape[-1] / 3)\n weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 1]\n parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4, 4, 0, 0, 0]\n weight_sum = []\n\n for part in range(5):\n p_sum = 0\n for j in range(joint_num):\n if parts[j] == part:\n p_sum += weight[j]\n weight_sum.append(p_sum)\n\n vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim), dtype=torch.float32).to(self.device)\n for i in range(seq_len):\n factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=torch.float32).to(self.device)\n for part in range(5):\n for j in range(joint_num):\n if parts[j] == part:\n factor[:, part: part + 1] = factor[:, part: part + 1] + weight[j] / weight_sum[part] * \\\n pow(pow(velocity[:, i:i + 1, j * 3], 2) +\n pow(velocity[:, i:i + 1, j * 3 + 1], 2) +\n pow(velocity[:, i:i + 1, j * 3 + 2], 2), 0.5)\n vel_factor[:, i, :] = factor\n\n return vel_factor\n\n def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):\n # velocity\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n src_init_pos = (init_pos *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n\n train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)\n\n # grad_fn=<DivBackward0>\n _train_velocity = (train_velocity -\n self._mean[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]) \\\n / self._std[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]\n\n train_vel_factor = self.get_vel_factor(train_velocity)\n\n _train_vel_factor = (train_vel_factor - self._mean[-self.vel_factor_dim:]) / self._std[-self.vel_factor_dim:]\n\n\n MSE_loss = nn.MSELoss()\n zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape).to(self.device)\n loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:], _train_velocity[:, 1:, :]) * 10 \\\n + MSE_loss(predict_seq[:, 0, -self.velocity_dim:], zero_seq) * 20\n loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:, 1:, :]) * 10\n\n velocity_loss = loss1 * 2 + loss2 * 1.5\n return velocity_loss\n\n\nclass ContactLoss(nn.Module):\n def __init__(self, _mean, _std, config):\n super(ContactLoss, self).__init__()\n self._mean = _mean\n self._std = _std\n self.root_pos_dim = config.root_pos_dim\n self.pos_dim = config.pos_dim\n self.contact_dim = config.contact_dim\n self.velocity_dim = config.velocity_dim\n self.left_feet = config.left_foot\n self.right_feet = config.right_foot\n self.vel_factor_dim = config.vel_factor_dim\n self.contact_loc = self.contact_dim + self.velocity_dim + self.vel_factor_dim\n\n def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot, right_foot):\n # [batch_size, T + 1, J * 3] grad_fn=<CatBackward>\n temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)\n\n left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:(left_foot[0] * 3 + 3)]\n - temp_positions[:, :-1, left_foot[0] * 3:(left_foot[0] * 3 + 3)]) ** 2\n left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)\n left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:(left_foot[1] * 3 + 3)]\n - temp_positions[:, :-1, left_foot[1] * 3:(left_foot[1] * 3 + 3)]) ** 2\n left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)\n right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:(right_foot[0] * 3 + 3)]\n - temp_positions[:, :-1, right_foot[0] * 3:(right_foot[0] * 3 + 3)]) ** 2\n right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)\n right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:(right_foot[1] * 3 + 3)]\n - temp_positions[:, :-1, right_foot[1] * 3:(right_foot[1] * 3 + 3)]) ** 2\n right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)\n feet_vel = torch.cat((left_foot0_vel, left_foot1_vel, right_foot0_vel, right_foot1_vel), -1)\n return feet_vel # [batch_size, seq_size, 4]\n\n def forward(self, predict_seq, _train_x1, _train_x2):\n init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]\n src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n src_init_pos = (init_pos *\n self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])\n feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos, self.left_feet,\n self.right_feet)\n\n feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self.velocity_dim):-self.velocity_dim] *\n self._std[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + \\\n self._mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)])\n contact_loss = torch.mean(torch.sum(torch.sum(feet_contact * feet_vels, dim=-1), dim=-1))\n return contact_loss * 2\n\n\nclass KeyframeLoss(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n self.key_num = config.key_num\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n\n num = predict_pos.shape[1]\n MSE_loss = nn.MSELoss()\n loss = torch.zeros([]).to(self.device)\n if num <= self.key_num * 2:\n for i in range(num):\n t = (i + 1) / (num + 1)\n pos = predict_pos[:, i, :]\n loss = loss + (1 - t) * MSE_loss(pos, key_frame1) + t * MSE_loss(pos, key_frame2)\n else:\n for i in range(self.key_num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)\n for i in range(num - self.key_num, num):\n loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)\n return loss * 2\n\n\nclass SmoothLoss(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.root_pos_dim = config.root_pos_dim\n self.root_rot_dim = config.root_rot_dim\n self.pos_dim = config.pos_dim\n\n def forward(self, predict_seq, _train_x1, gt_seq):\n init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.root_pos_dim]\n init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.root_pos_dim]\n root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.root_pos_dim]\n last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:\n self.pos_dim + self.root_pos_dim + self.root_rot_dim]\n\n # pos_seq SliceBackward\n seq_num = len(root_pos_seq[0])\n batch_size = len(root_pos_seq)\n root_pos_item = torch.zeros([]).to(self.device)\n root_rot_item = torch.zeros([]).to(self.device)\n MSE_loss = nn.MSELoss()\n for idx in range(seq_num):\n if idx == 0:\n # MeanBackward0\n root_pos_temp = MSE_loss(root_pos_seq[:, :1, :], init_root_pos[:])\n root_rot_temp = MSE_loss(root_rot_seq[:, :1, :], init_root_rot[:])\n elif idx == seq_num - 1:\n root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos) + \\\n MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)\n root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot) + \\\n MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)\n else:\n root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] - root_pos_seq[:, idx - 1, :], 2)) \\\n / batch_size / seq_num\n root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] - root_rot_seq[:, idx - 1, :], 2)) \\\n / batch_size / seq_num\n # AddBackward0\n root_pos_item = root_pos_item + root_pos_temp\n root_rot_item = root_rot_item + root_rot_temp\n loss = root_pos_item + root_rot_item # DivBackward0\n return loss * 1.5\n",
"step-ids": [
14,
18,
19,
23,
24
]
}
|
[
14,
18,
19,
23,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
red = range(4)
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
<|reserved_special_token_1|>
import turtle
red = range(4)
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
<|reserved_special_token_1|>
import turtle
red = range(4);
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
|
flexible
|
{
"blob_id": "38fceb57977cb792be1a63e8571cd222facdf656",
"index": 1142,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n",
"step-3": "<mask token>\nred = range(4)\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n",
"step-4": "import turtle\nred = range(4)\nfor i in red:\n turtle.forward(200)\n turtle.left(90)\nturtle.done()\n",
"step-5": "import turtle\n\nred = range(4);\nfor i in red:\n\tturtle.forward(200)\n\tturtle.left(90)\n\nturtle.done()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info('Updating to version %s' % DB_VERSION)
try:
log.info('Updating master_workflowstate...')
self.db.execute(
'ALTER TABLE master_workflowstate ADD reason TEXT NULL')
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info('Updating to version %s' % DB_VERSION)
try:
log.info('Updating master_workflowstate...')
self.db.execute(
'ALTER TABLE master_workflowstate ADD reason TEXT NULL')
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"""Downgrade is not necessary as reason accepts NULL values"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DB_VERSION = 8
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info('Updating to version %s' % DB_VERSION)
try:
log.info('Updating master_workflowstate...')
self.db.execute(
'ALTER TABLE master_workflowstate ADD reason TEXT NULL')
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"""Downgrade is not necessary as reason accepts NULL values"""
<|reserved_special_token_1|>
import logging
from sqlalchemy.exc import *
from Pegasus.db.admin.admin_loader import *
from Pegasus.db.admin.versions.base_version import BaseVersion
from Pegasus.db.schema import *
DB_VERSION = 8
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info('Updating to version %s' % DB_VERSION)
try:
log.info('Updating master_workflowstate...')
self.db.execute(
'ALTER TABLE master_workflowstate ADD reason TEXT NULL')
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"""Downgrade is not necessary as reason accepts NULL values"""
<|reserved_special_token_1|>
#!/usr/bin/env python
#
# Copyright 2017-2021 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from sqlalchemy.exc import *
from Pegasus.db.admin.admin_loader import *
from Pegasus.db.admin.versions.base_version import BaseVersion
from Pegasus.db.schema import *
DB_VERSION = 8
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info("Updating to version %s" % DB_VERSION)
try:
log.info("Updating master_workflowstate...")
self.db.execute("ALTER TABLE master_workflowstate ADD reason TEXT NULL")
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"Downgrade is not necessary as reason accepts NULL values"
|
flexible
|
{
"blob_id": "12fd4e3bfb6821205a9b65b4d236b4158ec4ef1e",
"index": 7345,
"step-1": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-3": "<mask token>\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-4": "import logging\nfrom sqlalchemy.exc import *\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-5": "#!/usr/bin/env python\n#\n# Copyright 2017-2021 University Of Southern California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport logging\n\nfrom sqlalchemy.exc import *\n\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\n\nDB_VERSION = 8\n\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info(\"Updating to version %s\" % DB_VERSION)\n try:\n log.info(\"Updating master_workflowstate...\")\n self.db.execute(\"ALTER TABLE master_workflowstate ADD reason TEXT NULL\")\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n\n self.db.commit()\n\n def downgrade(self, force=False):\n \"Downgrade is not necessary as reason accepts NULL values\"\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]
operations = [migrations.CreateModel(name='Product', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
255, unique=True)), ('lft', models.PositiveIntegerField(editable=
False)), ('rght', models.PositiveIntegerField(editable=False)), (
'tree_id', models.PositiveIntegerField(db_index=True, editable=
False)), ('level', models.PositiveIntegerField(editable=False)), (
'parent', mptt.fields.TreeForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'children', to='core.Product'))], options={'abstract': False}),
migrations.CreateModel(name='Ticket', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('name', models.CharField(max_length=255)), (
'description', models.CharField(max_length=255)), ('state', models.
CharField(max_length=255)), ('created', models.DateTimeField()), (
'product', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='tickets', to='core.Product'))]), migrations.
CreateModel(name='Task', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('description', models.CharField(max_length=255)), ('state',
models.CharField(max_length=255)), ('estimated', models.
DateTimeField()), ('reported', models.DateTimeField()), ('employee',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='tasks', to='users.Employee'))]), migrations.
CreateModel(name='Comment', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to='core.Ticket')), ('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel
(name='Attachment', fields=[('id', models.AutoField(auto_created=
True, primary_key=True, serialize=False, verbose_name='ID')), (
'name', models.CharField(max_length=255)), ('file', models.
FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='attachments', to=
'core.Ticket'))])]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]
operations = [migrations.CreateModel(name='Product', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
255, unique=True)), ('lft', models.PositiveIntegerField(editable=
False)), ('rght', models.PositiveIntegerField(editable=False)), (
'tree_id', models.PositiveIntegerField(db_index=True, editable=
False)), ('level', models.PositiveIntegerField(editable=False)), (
'parent', mptt.fields.TreeForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'children', to='core.Product'))], options={'abstract': False}),
migrations.CreateModel(name='Ticket', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('name', models.CharField(max_length=255)), (
'description', models.CharField(max_length=255)), ('state', models.
CharField(max_length=255)), ('created', models.DateTimeField()), (
'product', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='tickets', to='core.Product'))]), migrations.
CreateModel(name='Task', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('description', models.CharField(max_length=255)), ('state',
models.CharField(max_length=255)), ('estimated', models.
DateTimeField()), ('reported', models.DateTimeField()), ('employee',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='tasks', to='users.Employee'))]), migrations.
CreateModel(name='Comment', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to='core.Ticket')), ('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel
(name='Attachment', fields=[('id', models.AutoField(auto_created=
True, primary_key=True, serialize=False, verbose_name='ID')), (
'name', models.CharField(max_length=255)), ('file', models.
FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='attachments', to=
'core.Ticket'))])]
<|reserved_special_token_1|>
# Generated by Django 2.2.5 on 2019-10-09 12:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0002_customer_employee_lead_manager'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Product')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='core.Product')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('estimated', models.DateTimeField()),
('reported', models.DateTimeField()),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='users.Employee')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('file', models.FileField(upload_to='')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='core.Ticket')),
],
),
]
|
flexible
|
{
"blob_id": "5485fe4f612ededc11e3a96dfd546e97a56cbe2a",
"index": 3316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]\n operations = [migrations.CreateModel(name='Product', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 255, unique=True)), ('lft', models.PositiveIntegerField(editable=\n False)), ('rght', models.PositiveIntegerField(editable=False)), (\n 'tree_id', models.PositiveIntegerField(db_index=True, editable=\n False)), ('level', models.PositiveIntegerField(editable=False)), (\n 'parent', mptt.fields.TreeForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'children', to='core.Product'))], options={'abstract': False}),\n migrations.CreateModel(name='Ticket', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=255)), (\n 'description', models.CharField(max_length=255)), ('state', models.\n CharField(max_length=255)), ('created', models.DateTimeField()), (\n 'product', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='tickets', to='core.Product'))]), migrations.\n CreateModel(name='Task', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('description', models.CharField(max_length=255)), ('state',\n models.CharField(max_length=255)), ('estimated', models.\n DateTimeField()), ('reported', models.DateTimeField()), ('employee',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='tasks', to='users.Employee'))]), migrations.\n CreateModel(name='Comment', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='core.Ticket')), ('user', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel\n (name='Attachment', fields=[('id', models.AutoField(auto_created=\n True, primary_key=True, serialize=False, verbose_name='ID')), (\n 'name', models.CharField(max_length=255)), ('file', models.\n FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='attachments', to=\n 'core.Ticket'))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('users', '0002_customer_employee_lead_manager')]\n operations = [migrations.CreateModel(name='Product', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 255, unique=True)), ('lft', models.PositiveIntegerField(editable=\n False)), ('rght', models.PositiveIntegerField(editable=False)), (\n 'tree_id', models.PositiveIntegerField(db_index=True, editable=\n False)), ('level', models.PositiveIntegerField(editable=False)), (\n 'parent', mptt.fields.TreeForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'children', to='core.Product'))], options={'abstract': False}),\n migrations.CreateModel(name='Ticket', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=255)), (\n 'description', models.CharField(max_length=255)), ('state', models.\n CharField(max_length=255)), ('created', models.DateTimeField()), (\n 'product', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='tickets', to='core.Product'))]), migrations.\n CreateModel(name='Task', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('description', models.CharField(max_length=255)), ('state',\n models.CharField(max_length=255)), ('estimated', models.\n DateTimeField()), ('reported', models.DateTimeField()), ('employee',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='tasks', to='users.Employee'))]), migrations.\n CreateModel(name='Comment', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('text', models.TextField()), ('ticket', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to='core.Ticket')), ('user', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'comments', to=settings.AUTH_USER_MODEL))]), migrations.CreateModel\n (name='Attachment', fields=[('id', models.AutoField(auto_created=\n True, primary_key=True, serialize=False, verbose_name='ID')), (\n 'name', models.CharField(max_length=255)), ('file', models.\n FileField(upload_to='')), ('ticket', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='attachments', to=\n 'core.Ticket'))])]\n",
"step-5": "# Generated by Django 2.2.5 on 2019-10-09 12:06\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('users', '0002_customer_employee_lead_manager'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255, unique=True)),\n ('lft', models.PositiveIntegerField(editable=False)),\n ('rght', models.PositiveIntegerField(editable=False)),\n ('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),\n ('level', models.PositiveIntegerField(editable=False)),\n ('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Product')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Ticket',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('description', models.CharField(max_length=255)),\n ('state', models.CharField(max_length=255)),\n ('created', models.DateTimeField()),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='core.Product')),\n ],\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('description', models.CharField(max_length=255)),\n ('state', models.CharField(max_length=255)),\n ('estimated', models.DateTimeField()),\n ('reported', models.DateTimeField()),\n ('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='users.Employee')),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('text', models.TextField()),\n ('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.Ticket')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Attachment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('file', models.FileField(upload_to='')),\n ('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='core.Ticket')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import sys
import requests
import urllib.parse
import urllib.request
import json
from shutil import copyfile
def sdssDownload(band, location, size, path):
"""
.
sdssArchie populates a directory with links to raw images
from the SDSS mission. These images are all in FITS format
and suitable for reprojection, moaicking, etc.
Parameters
----------
band: str
SDSS wavelength band (e.g. "g").
location: str
Coordinates or name of an astronomical object
(e.g. "4h23m11s -12d14m32.3s", "Messier 017").
size: float
Region size in degrees.
path: str
Directory for output files.
"""
debug = 0
# Build the URL to get image metadata
url = "http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+" \
+ urllib.parse.quote_plus(band) \
+ "&location=" \
+ urllib.parse.quote_plus(location) \
+ "&size=" \
+ str(size) + "&units=deg&mode=JSON"
if debug:
print('DEBUG> url = "' + url + '"')
# Retrieve the image metadata and convert
# the JSON to a Python dictionary
fjson = urllib.request.urlopen(url)
data = json.load(fjson)
if debug:
print("DEBUG> data: ")
print(data)
nimages = len(data)
if debug:
print("DEBUG> nimages = " + str(nimages))
# We need to check the given directory,
# whether it exists, whether it is writeable,
# etc. We'll do it by trying to create it,
# then trying to write the image data it.
rtn = {}
try:
if not os.path.exists(path):
os.makedirs(path)
except:
rtn['status'] = 1
rtn['msg' ] = 'Cannot create output directory.'
return rtn
# Retrieve all the images into the data directory
try:
for index in range(0,nimages):
datafile = path + "/" + data[index]['file']
url = data[index]['url']
archivefile = url
archivefile = archivefile.replace('http://das.sdss.org','/home/idies/workspace/sdss_das/das2')
if debug:
print('copy file ' + archivefile + ' to ' + datafile)
copyfile(archivefile, datafile)
except:
rtn['status'] = 1
rtn['msg' ] = 'Error reading or writing data'
return rtn
# Success
rtn['status'] = 0
rtn['count' ] = nimages
return rtn
|
normal
|
{
"blob_id": "459bd36037158c9a6a38da6eadf45a3dc6f19e04",
"index": 4405,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sdssDownload(band, location, size, path):\n \"\"\"\n .\n sdssArchie populates a directory with links to raw images \n from the SDSS mission. These images are all in FITS format \n and suitable for reprojection, moaicking, etc.\n\n Parameters\n ----------\n band: str\n SDSS wavelength band (e.g. \"g\").\n\n location: str\n Coordinates or name of an astronomical object\n (e.g. \"4h23m11s -12d14m32.3s\", \"Messier 017\").\n\n size: float\n Region size in degrees.\n\n path: str\n Directory for output files.\n \"\"\"\n debug = 0\n url = (\n 'http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+'\n + urllib.parse.quote_plus(band) + '&location=' + urllib.parse.\n quote_plus(location) + '&size=' + str(size) + '&units=deg&mode=JSON')\n if debug:\n print('DEBUG> url = \"' + url + '\"')\n fjson = urllib.request.urlopen(url)\n data = json.load(fjson)\n if debug:\n print('DEBUG> data: ')\n print(data)\n nimages = len(data)\n if debug:\n print('DEBUG> nimages = ' + str(nimages))\n rtn = {}\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n except:\n rtn['status'] = 1\n rtn['msg'] = 'Cannot create output directory.'\n return rtn\n try:\n for index in range(0, nimages):\n datafile = path + '/' + data[index]['file']\n url = data[index]['url']\n archivefile = url\n archivefile = archivefile.replace('http://das.sdss.org',\n '/home/idies/workspace/sdss_das/das2')\n if debug:\n print('copy file ' + archivefile + ' to ' + datafile)\n copyfile(archivefile, datafile)\n except:\n rtn['status'] = 1\n rtn['msg'] = 'Error reading or writing data'\n return rtn\n rtn['status'] = 0\n rtn['count'] = nimages\n return rtn\n",
"step-3": "import os\nimport sys\nimport requests\nimport urllib.parse\nimport urllib.request\nimport json\nfrom shutil import copyfile\n\n\ndef sdssDownload(band, location, size, path):\n \"\"\"\n .\n sdssArchie populates a directory with links to raw images \n from the SDSS mission. These images are all in FITS format \n and suitable for reprojection, moaicking, etc.\n\n Parameters\n ----------\n band: str\n SDSS wavelength band (e.g. \"g\").\n\n location: str\n Coordinates or name of an astronomical object\n (e.g. \"4h23m11s -12d14m32.3s\", \"Messier 017\").\n\n size: float\n Region size in degrees.\n\n path: str\n Directory for output files.\n \"\"\"\n debug = 0\n url = (\n 'http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+'\n + urllib.parse.quote_plus(band) + '&location=' + urllib.parse.\n quote_plus(location) + '&size=' + str(size) + '&units=deg&mode=JSON')\n if debug:\n print('DEBUG> url = \"' + url + '\"')\n fjson = urllib.request.urlopen(url)\n data = json.load(fjson)\n if debug:\n print('DEBUG> data: ')\n print(data)\n nimages = len(data)\n if debug:\n print('DEBUG> nimages = ' + str(nimages))\n rtn = {}\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n except:\n rtn['status'] = 1\n rtn['msg'] = 'Cannot create output directory.'\n return rtn\n try:\n for index in range(0, nimages):\n datafile = path + '/' + data[index]['file']\n url = data[index]['url']\n archivefile = url\n archivefile = archivefile.replace('http://das.sdss.org',\n '/home/idies/workspace/sdss_das/das2')\n if debug:\n print('copy file ' + archivefile + ' to ' + datafile)\n copyfile(archivefile, datafile)\n except:\n rtn['status'] = 1\n rtn['msg'] = 'Error reading or writing data'\n return rtn\n rtn['status'] = 0\n rtn['count'] = nimages\n return rtn\n",
"step-4": "import os\nimport sys\nimport requests\nimport urllib.parse\nimport urllib.request\nimport json\n\nfrom shutil import copyfile\n\ndef sdssDownload(band, location, size, path):\n\n \"\"\"\n .\n sdssArchie populates a directory with links to raw images \n from the SDSS mission. These images are all in FITS format \n and suitable for reprojection, moaicking, etc.\n\n Parameters\n ----------\n band: str\n SDSS wavelength band (e.g. \"g\").\n\n location: str\n Coordinates or name of an astronomical object\n (e.g. \"4h23m11s -12d14m32.3s\", \"Messier 017\").\n\n size: float\n Region size in degrees.\n\n path: str\n Directory for output files.\n \"\"\"\n\n debug = 0\n\n \n # Build the URL to get image metadata\n \n url = \"http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+\" \\\n + urllib.parse.quote_plus(band) \\\n + \"&location=\" \\\n + urllib.parse.quote_plus(location) \\\n + \"&size=\" \\\n + str(size) + \"&units=deg&mode=JSON\"\n \n if debug:\n print('DEBUG> url = \"' + url + '\"')\n \n \n # Retrieve the image metadata and convert\n # the JSON to a Python dictionary\n \n fjson = urllib.request.urlopen(url)\n \n data = json.load(fjson)\n \n if debug:\n print(\"DEBUG> data: \")\n print(data)\n \n nimages = len(data)\n \n if debug:\n print(\"DEBUG> nimages = \" + str(nimages))\n \n \n # We need to check the given directory, \n # whether it exists, whether it is writeable,\n # etc. We'll do it by trying to create it,\n # then trying to write the image data it.\n \n rtn = {} \n \n try:\n \n if not os.path.exists(path):\n os.makedirs(path)\n \n except:\n rtn['status'] = 1\n rtn['msg' ] = 'Cannot create output directory.'\n return rtn \n \n \n # Retrieve all the images into the data directory\n\n try:\n for index in range(0,nimages):\n \n datafile = path + \"/\" + data[index]['file']\n url = data[index]['url']\n archivefile = url\n archivefile = archivefile.replace('http://das.sdss.org','/home/idies/workspace/sdss_das/das2')\n\n if debug:\n print('copy file ' + archivefile + ' to ' + datafile)\n\n copyfile(archivefile, datafile)\n\n except:\n \n rtn['status'] = 1\n rtn['msg' ] = 'Error reading or writing data'\n return rtn\n \n \n # Success\n \n rtn['status'] = 0\n rtn['count' ] = nimages\n return rtn\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .most_serializers import *
|
normal
|
{
"blob_id": "a718949ed95b7d78f091b1e0f237eed151b102ae",
"index": 2160,
"step-1": "<mask token>\n",
"step-2": "from .most_serializers import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/python3
import requests
import urllib3
urllib3.disable_warnings()
response = requests.get('https://freeaeskey.xyz', verify=False)
data = response.text.encode('utf-8')
key = data[data.index(b'<b>')+3:data.index(b'</b>')]
print(key.decode('ascii'))
|
normal
|
{
"blob_id": "368e209f83cc0cade81791c8357e01e7e3f940c8",
"index": 97,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurllib3.disable_warnings()\n<mask token>\nprint(key.decode('ascii'))\n",
"step-3": "<mask token>\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>') + 3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n",
"step-4": "import requests\nimport urllib3\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>') + 3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n",
"step-5": "#!/usr/bin/python3\n\nimport requests\nimport urllib3\nurllib3.disable_warnings()\nresponse = requests.get('https://freeaeskey.xyz', verify=False)\ndata = response.text.encode('utf-8')\nkey = data[data.index(b'<b>')+3:data.index(b'</b>')]\nprint(key.decode('ascii'))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (C) 2020 Francis Sun, all rights reserved.
"""A copyright utility"""
import datetime
import argparse
import os
import os.path
class Copyright:
_file_type = {
'c/c++': ['h', 'c', 'cpp', 'cc'],
'python': ['py'],
'cmake': ['cmake'],
'vim': ['vim'],
'shell': ['sh']
}
_declaration = "Copyright (C) {0} {1}, all rights reserved."
_formaters = {}
def __init__(self, file_path, author):
self.file_path = file_path
self.author = author
file_name = self.file_path.split(os.path.sep)[-1]
if file_name == 'CMakeLists.txt':
self.file_type = 'cmake'
elif file_name == 'vimrc':
self.file_type = 'vim'
else:
self.file_type = self.file_path.split('.')[-1]
self.declaration = Copyright._declaration.format(
datetime.date.today().year, self.author)
def _c_cpp_formater(self):
return "/* " + self.declaration + " */"
for ft in _file_type['c/c++']:
_formaters[ft] = _c_cpp_formater
def _py_formater(self):
return "# " + self.declaration
for ft in _file_type['python']:
_formaters[ft] = _py_formater
def _cmake_formater(self):
return "# " + self.declaration
for ft in _file_type['cmake']:
_formaters[ft] = _cmake_formater
def _vim_formater(self):
return "\" " + self.declaration
for ft in _file_type['vim']:
_formaters[ft] = _vim_formater
def _shell_formater(self):
return "# " + self.declaration
for ft in _file_type['shell']:
_formaters[ft] = _shell_formater
def get_declaration(self):
if self.file_type in Copyright._formaters:
return Copyright._formaters[self.file_type](self)
tmp_filename_suffix = ".fjcu"
def Write(self):
tmp_filename = self.file_path + Copyright.tmp_filename_suffix
with open(tmp_filename, 'w') as tmp_f:
origin_content = ""
if os.path.isfile(self.file_path):
with open(self.file_path, 'r') as origin_f:
origin_content = origin_f.read()
tmp_f.write(self.get_declaration() + "\n" + origin_content)
os.replace(tmp_filename, self.file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file_path')
parser.add_argument('author')
opt = parser.parse_args()
cr = Copyright(opt.file_path, opt.author)
cr.Write()
|
normal
|
{
"blob_id": "dc05a441c21a67fbb3a1975b3fccb865a32731c8",
"index": 4642,
"step-1": "<mask token>\n\n\nclass Copyright:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n <mask token>\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n <mask token>\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n <mask token>\n <mask token>\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Copyright:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n <mask token>\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n <mask token>\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Copyright:\n _file_type = {'c/c++': ['h', 'c', 'cpp', 'cc'], 'python': ['py'],\n 'cmake': ['cmake'], 'vim': ['vim'], 'shell': ['sh']}\n _declaration = 'Copyright (C) {0} {1}, all rights reserved.'\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n self.declaration = Copyright._declaration.format(datetime.date.\n today().year, self.author)\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return '\" ' + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n tmp_filename_suffix = '.fjcu'\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-4": "<mask token>\nimport datetime\nimport argparse\nimport os\nimport os.path\n\n\nclass Copyright:\n _file_type = {'c/c++': ['h', 'c', 'cpp', 'cc'], 'python': ['py'],\n 'cmake': ['cmake'], 'vim': ['vim'], 'shell': ['sh']}\n _declaration = 'Copyright (C) {0} {1}, all rights reserved.'\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n self.declaration = Copyright._declaration.format(datetime.date.\n today().year, self.author)\n\n def _c_cpp_formater(self):\n return '/* ' + self.declaration + ' */'\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return '\" ' + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return '# ' + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n tmp_filename_suffix = '.fjcu'\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = ''\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + '\\n' + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-5": "# Copyright (C) 2020 Francis Sun, all rights reserved.\n\n\"\"\"A copyright utility\"\"\"\n\nimport datetime\nimport argparse\nimport os\nimport os.path\n\n\nclass Copyright:\n _file_type = {\n 'c/c++': ['h', 'c', 'cpp', 'cc'],\n 'python': ['py'],\n 'cmake': ['cmake'],\n 'vim': ['vim'],\n 'shell': ['sh']\n }\n _declaration = \"Copyright (C) {0} {1}, all rights reserved.\"\n _formaters = {}\n\n def __init__(self, file_path, author):\n self.file_path = file_path\n self.author = author\n file_name = self.file_path.split(os.path.sep)[-1]\n\n if file_name == 'CMakeLists.txt':\n self.file_type = 'cmake'\n elif file_name == 'vimrc':\n self.file_type = 'vim'\n else:\n self.file_type = self.file_path.split('.')[-1]\n\n self.declaration = Copyright._declaration.format(\n datetime.date.today().year, self.author)\n\n def _c_cpp_formater(self):\n return \"/* \" + self.declaration + \" */\"\n for ft in _file_type['c/c++']:\n _formaters[ft] = _c_cpp_formater\n\n def _py_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['python']:\n _formaters[ft] = _py_formater\n\n def _cmake_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['cmake']:\n _formaters[ft] = _cmake_formater\n\n def _vim_formater(self):\n return \"\\\" \" + self.declaration\n for ft in _file_type['vim']:\n _formaters[ft] = _vim_formater\n\n def _shell_formater(self):\n return \"# \" + self.declaration\n for ft in _file_type['shell']:\n _formaters[ft] = _shell_formater\n\n def get_declaration(self):\n if self.file_type in Copyright._formaters:\n return Copyright._formaters[self.file_type](self)\n\n tmp_filename_suffix = \".fjcu\"\n\n def Write(self):\n tmp_filename = self.file_path + Copyright.tmp_filename_suffix\n with open(tmp_filename, 'w') as tmp_f:\n origin_content = \"\"\n if os.path.isfile(self.file_path):\n with open(self.file_path, 'r') as origin_f:\n origin_content = origin_f.read()\n tmp_f.write(self.get_declaration() + \"\\n\" + origin_content)\n os.replace(tmp_filename, self.file_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file_path')\n parser.add_argument('author')\n opt = parser.parse_args()\n cr = Copyright(opt.file_path, opt.author)\n cr.Write()\n",
"step-ids": [
5,
7,
11,
12,
13
]
}
|
[
5,
7,
11,
12,
13
] |
import os
import pandas as pd
from tabulate import tabulate
if __name__ == '__main__':
bestPrecision = [0,0,0,0,0,0]
bestPrecisionFile = ['','','','','','']
bestRecall = [0,0,0,0,0,0]
bestRecallFile = ['','','','','','']
bestSupport = [0,0,0,0,0,0]
bestSupportFile = ['','','','','','']
bestF1_Score = [0,0,0,0,0,0]
bestF1_ScoreFile = ['','','','','','']
bestPrecisionOverall = 0
bestPrecisionOverallFile = ''
bestRecallOverall = 0
bestRecallOverallFile = ''
bestSupportOverall = 0
bestSupportOverallFile = ''
bestF1_ScoreOverall = 0
bestF1_ScoreOverallFile = ''
for file in os.listdir("results"):
# (0.359*a)+(0.256*b)+(0.205*c)+(0.087*d)+(0.073*e)+(0.016*f)
df = pd.read_csv("results/"+file)
for i in range(0,6):
if bestF1_Score[i] < df["f1_score"][i]:
bestF1_Score[i] = df["f1_score"][i]
bestF1_ScoreFile[i]=file
if bestPrecision[i] < df["precision"][i]:
bestPrecision[i] = df["precision"][i]
bestPrecisionFile[i] = file
if bestRecall[i] < df["recall"][i]:
bestRecall[i] = df["recall"][i]
bestRecallFile[i] = file
if bestSupport[i] < df["support"][i]:
bestSupport[i] = df["support"][i]
bestSupportFile[i] = file
currPrecision = 0
currRecall = 0
currSupport = 0
currF1_Score = 0
for idx,value in enumerate([0.359,0.256,0.205,0.087,0.073,0.016]):
currF1_Score += (value * df["f1_score"][idx])
currPrecision += (value * df["precision"][idx])
currRecall += (value * df["recall"][idx])
currSupport += (value * df["support"][idx])
if currPrecision > bestPrecisionOverall:
bestPrecisionOverall=currPrecision
bestPrecisionOverallFile = file
print(file)
print(bestPrecisionOverall)
if currRecall > bestRecallOverall:
bestRecallOverall=currRecall
bestRecallOverallFile = file
if currSupport > bestSupportOverall:
bestSupportOverall=currSupport
bestSupportOverallFile = file
if currF1_Score > bestF1_ScoreOverall:
bestF1_ScoreOverall=currF1_Score
bestF1_ScoreOverallFile = file
bestPrecision.insert(0,"Precision")
bestPrecisionFile.insert(0, "Precision")
bestRecall.insert(0, "Recall")
bestRecallFile.insert(0, "Recall")
bestSupport.insert(0, "Support")
bestSupportFile.insert(0, "Support")
bestF1_Score.insert(0, "F1_SCORE")
bestF1_ScoreFile.insert(0, "F1_SCORE")
tableSpecific = [["","Class0","Class1","Class2","Class3","Class4","Class5"],
bestPrecision,bestPrecisionFile,bestRecall,bestRecallFile,
bestSupport,bestSupportFile,bestF1_Score,bestF1_ScoreFile]
tableGeneral = [ ["Precision Best","Recall Best","Support Best","F1_Score Best"],
[bestPrecisionOverall,bestRecallOverall,bestSupportOverall,bestF1_ScoreOverall],
[bestPrecisionOverallFile,bestRecallOverallFile,bestSupportOverallFile,bestF1_ScoreOverallFile]]
print(tabulate(tableSpecific))
print(tabulate(tableGeneral))
|
normal
|
{
"blob_id": "22c498d84f40455d89ed32ccf3bf8778cb159579",
"index": 79,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n bestPrecision = [0, 0, 0, 0, 0, 0]\n bestPrecisionFile = ['', '', '', '', '', '']\n bestRecall = [0, 0, 0, 0, 0, 0]\n bestRecallFile = ['', '', '', '', '', '']\n bestSupport = [0, 0, 0, 0, 0, 0]\n bestSupportFile = ['', '', '', '', '', '']\n bestF1_Score = [0, 0, 0, 0, 0, 0]\n bestF1_ScoreFile = ['', '', '', '', '', '']\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n for file in os.listdir('results'):\n df = pd.read_csv('results/' + file)\n for i in range(0, 6):\n if bestF1_Score[i] < df['f1_score'][i]:\n bestF1_Score[i] = df['f1_score'][i]\n bestF1_ScoreFile[i] = file\n if bestPrecision[i] < df['precision'][i]:\n bestPrecision[i] = df['precision'][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df['recall'][i]:\n bestRecall[i] = df['recall'][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df['support'][i]:\n bestSupport[i] = df['support'][i]\n bestSupportFile[i] = file\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]\n ):\n currF1_Score += value * df['f1_score'][idx]\n currPrecision += value * df['precision'][idx]\n currRecall += value * df['recall'][idx]\n currSupport += value * df['support'][idx]\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall = currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall = currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall = currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall = currF1_Score\n bestF1_ScoreOverallFile = file\n bestPrecision.insert(0, 'Precision')\n bestPrecisionFile.insert(0, 'Precision')\n bestRecall.insert(0, 'Recall')\n bestRecallFile.insert(0, 'Recall')\n bestSupport.insert(0, 'Support')\n bestSupportFile.insert(0, 'Support')\n bestF1_Score.insert(0, 'F1_SCORE')\n bestF1_ScoreFile.insert(0, 'F1_SCORE')\n tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',\n 'Class5'], bestPrecision, bestPrecisionFile, bestRecall,\n bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,\n bestF1_ScoreFile]\n tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',\n 'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,\n bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,\n bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]\n ]\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n",
"step-3": "import os\nimport pandas as pd\nfrom tabulate import tabulate\nif __name__ == '__main__':\n bestPrecision = [0, 0, 0, 0, 0, 0]\n bestPrecisionFile = ['', '', '', '', '', '']\n bestRecall = [0, 0, 0, 0, 0, 0]\n bestRecallFile = ['', '', '', '', '', '']\n bestSupport = [0, 0, 0, 0, 0, 0]\n bestSupportFile = ['', '', '', '', '', '']\n bestF1_Score = [0, 0, 0, 0, 0, 0]\n bestF1_ScoreFile = ['', '', '', '', '', '']\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n for file in os.listdir('results'):\n df = pd.read_csv('results/' + file)\n for i in range(0, 6):\n if bestF1_Score[i] < df['f1_score'][i]:\n bestF1_Score[i] = df['f1_score'][i]\n bestF1_ScoreFile[i] = file\n if bestPrecision[i] < df['precision'][i]:\n bestPrecision[i] = df['precision'][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df['recall'][i]:\n bestRecall[i] = df['recall'][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df['support'][i]:\n bestSupport[i] = df['support'][i]\n bestSupportFile[i] = file\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]\n ):\n currF1_Score += value * df['f1_score'][idx]\n currPrecision += value * df['precision'][idx]\n currRecall += value * df['recall'][idx]\n currSupport += value * df['support'][idx]\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall = currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall = currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall = currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall = currF1_Score\n bestF1_ScoreOverallFile = file\n bestPrecision.insert(0, 'Precision')\n bestPrecisionFile.insert(0, 'Precision')\n bestRecall.insert(0, 'Recall')\n bestRecallFile.insert(0, 'Recall')\n bestSupport.insert(0, 'Support')\n bestSupportFile.insert(0, 'Support')\n bestF1_Score.insert(0, 'F1_SCORE')\n bestF1_ScoreFile.insert(0, 'F1_SCORE')\n tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',\n 'Class5'], bestPrecision, bestPrecisionFile, bestRecall,\n bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,\n bestF1_ScoreFile]\n tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',\n 'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,\n bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,\n bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]\n ]\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n",
"step-4": "import os\nimport pandas as pd\nfrom tabulate import tabulate\n\nif __name__ == '__main__':\n\n bestPrecision = [0,0,0,0,0,0]\n bestPrecisionFile = ['','','','','','']\n bestRecall = [0,0,0,0,0,0]\n bestRecallFile = ['','','','','','']\n bestSupport = [0,0,0,0,0,0]\n bestSupportFile = ['','','','','','']\n bestF1_Score = [0,0,0,0,0,0]\n bestF1_ScoreFile = ['','','','','','']\n\n bestPrecisionOverall = 0\n bestPrecisionOverallFile = ''\n bestRecallOverall = 0\n bestRecallOverallFile = ''\n bestSupportOverall = 0\n bestSupportOverallFile = ''\n bestF1_ScoreOverall = 0\n bestF1_ScoreOverallFile = ''\n\n for file in os.listdir(\"results\"):\n\n # (0.359*a)+(0.256*b)+(0.205*c)+(0.087*d)+(0.073*e)+(0.016*f)\n df = pd.read_csv(\"results/\"+file)\n\n for i in range(0,6):\n if bestF1_Score[i] < df[\"f1_score\"][i]:\n bestF1_Score[i] = df[\"f1_score\"][i]\n bestF1_ScoreFile[i]=file\n if bestPrecision[i] < df[\"precision\"][i]:\n bestPrecision[i] = df[\"precision\"][i]\n bestPrecisionFile[i] = file\n if bestRecall[i] < df[\"recall\"][i]:\n bestRecall[i] = df[\"recall\"][i]\n bestRecallFile[i] = file\n if bestSupport[i] < df[\"support\"][i]:\n bestSupport[i] = df[\"support\"][i]\n bestSupportFile[i] = file\n\n currPrecision = 0\n currRecall = 0\n currSupport = 0\n currF1_Score = 0\n\n for idx,value in enumerate([0.359,0.256,0.205,0.087,0.073,0.016]):\n currF1_Score += (value * df[\"f1_score\"][idx])\n currPrecision += (value * df[\"precision\"][idx])\n currRecall += (value * df[\"recall\"][idx])\n currSupport += (value * df[\"support\"][idx])\n\n if currPrecision > bestPrecisionOverall:\n bestPrecisionOverall=currPrecision\n bestPrecisionOverallFile = file\n print(file)\n print(bestPrecisionOverall)\n if currRecall > bestRecallOverall:\n bestRecallOverall=currRecall\n bestRecallOverallFile = file\n if currSupport > bestSupportOverall:\n bestSupportOverall=currSupport\n bestSupportOverallFile = file\n if currF1_Score > bestF1_ScoreOverall:\n bestF1_ScoreOverall=currF1_Score\n bestF1_ScoreOverallFile = file\n\n bestPrecision.insert(0,\"Precision\")\n bestPrecisionFile.insert(0, \"Precision\")\n bestRecall.insert(0, \"Recall\")\n bestRecallFile.insert(0, \"Recall\")\n bestSupport.insert(0, \"Support\")\n bestSupportFile.insert(0, \"Support\")\n bestF1_Score.insert(0, \"F1_SCORE\")\n bestF1_ScoreFile.insert(0, \"F1_SCORE\")\n\n tableSpecific = [[\"\",\"Class0\",\"Class1\",\"Class2\",\"Class3\",\"Class4\",\"Class5\"],\n bestPrecision,bestPrecisionFile,bestRecall,bestRecallFile,\n bestSupport,bestSupportFile,bestF1_Score,bestF1_ScoreFile]\n\n tableGeneral = [ [\"Precision Best\",\"Recall Best\",\"Support Best\",\"F1_Score Best\"],\n [bestPrecisionOverall,bestRecallOverall,bestSupportOverall,bestF1_ScoreOverall],\n [bestPrecisionOverallFile,bestRecallOverallFile,bestSupportOverallFile,bestF1_ScoreOverallFile]]\n\n print(tabulate(tableSpecific))\n print(tabulate(tableGeneral))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#****************************************************************************
# fieldformat.py, provides non-GUI base classes for field formating
#
# TreeLine, an information storage program
# Copyright (C) 2006, Douglas W. Bell
#
# This is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License, either Version 2 or any later
# version. This program is distributed in the hope that it will be useful,
# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.
#****************************************************************************
import re
from xml.sax.saxutils import escape, unescape
from gennumber import GenNumber, GenNumberError
from gendate import GenDate, GenDateError
from gentime import GenTime, GenTimeError
from genboolean import GenBoolean, GenBooleanError
import treedoc
import globalref
_errorStr = '#####'
def xslEscape(text):
"""Encapsulate all literal text in <xsl:text> elements
and transform/escape some non-XML entities.
For the moment, only is supported"""
nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')
escDict = {'&nbsp;': ' '} # escape function does '&' first
def esc(matchObj):
"""Return escaped replacement text"""
if matchObj.group(1) == None: # no tags found
return u'<xsl:text>%s</xsl:text>' % \
escape(matchObj.group(3), escDict)
if matchObj.group(1): # leading text and tag
return u'<xsl:text>%s</xsl:text>%s' % \
(escape(matchObj.group(1), escDict), matchObj.group(2))
return matchObj.group(2) # tag only
return nonTagRe.sub(esc, text)
class TextFormat(object):
"""Holds format info for a normal text field"""
typeName = 'Text'
sortSequence = 20
stripTagRe = re.compile('<.*?>')
defaultNumLines = 1
#field format edit options:
defaultFormat = ''
formatMenuList = []
htmlOption = True
hasEditChoices = False
autoAddChoices = False
hasFileBrowse = False
allowAltLinkText = False
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = '' # used only by fileFormat field for i18n
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
# defaults to no html (line breaks preserved)
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y') and \
True or False
self.hidden = attrs.get(u'hidden', '').startswith('y') and \
True or False
try:
self.numLines = int(attrs.get(u'lines',
repr(self.defaultNumLines)))
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
def englishName(self):
"""Returns English name if assigned, o/w name"""
if self.enName:
return self.enName
return self.name
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField,
treedoc.escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return (result[0], False)
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return (editText, editText or not self.isRequired)
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:if test="normalize-space(./%s)">%s'\
'<xsl:value-of select="./%s"/>%s</xsl:if>' % \
(self.name, xslEscape(self.prefix), self.name,
xslEscape(self.suffix))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
# typeName = 'LongText'
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
#field format edit options:
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'),
(u'%s\t%s' % (_('Required Digit'), '0'), '0'),
(u'%s\t%s' % (_('Digit or Space (external)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Decimal Point'), '.'), '.'),
(u'%s\t%s' % (_('Decimal Comma'), ','), ','),
None,
(u'%s\t%s' % (_('Comma Separator'), '\,'), '\,'),
(u'%s\t%s' % (_('Dot Separator'), '\.'), '\.'),
(u'%s\t%s' % (_('Space Separator (internal)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Optional Sign'), '-'), '-'),
(u'%s\t%s' % (_('Required Sign'), '+'), '+'),
None,
(u'%s\t%s' % (_('Exponent (capital)'), 'E'), 'E'),
(u'%s\t%s' % (_('Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return (GenNumber(storedText).numStr(self.format), True)
except GenNumberError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return (repr(GenNumber().setFromStr(editText, self.format)), True)
except GenNumberError:
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
#field format edit options:
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None,
(u'%s\t%s' % (_('"/" Character'), '//'), '//'), None,
(u'%s\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\0', self.editSep) for text in
textStr.replace(self.editSep * 2, '\0').
split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = (',', ';', ':', '|', '/', '\\', '~')
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList
if sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return (sortedChoices, True)
else:
return (sortedChoices, False)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return (storedText, not storedText)
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return (self.editSep.join(choices), True)
else:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList
if text not in currentChoices]
results = []
for choice in nonChoices: # menu entries to add a choice
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('add'), choice)))
if currentChoices:
results.append((None, None)) # separator
for choice in currentChoices: # menu entries to remove a choice
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
#field format edit options:
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return (editText, True)
return (editText, not self.isRequired)
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
#field format edit options:
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = ('Now', _('Now', 'date stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),
(u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),
None,
(u'%s\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),
(u'%s\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),
(u'%s\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),
(u'%s\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),
None,
(u'%s\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),
(u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None,
(u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),
(u'%s\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),
(u'%s\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (GenDate(storedText).dateStr(format), True)
except GenDateError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (repr(GenDate().setFromStr(editText, format)), True)
except GenDateError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')),
(yesterday, '(%s)' % _('yesterday')),
(tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
#field format edit options:
defaultFormat = u'h:MM:SS aa'
timeStampStrings = ('Now', _('Now', 'time stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'),
(u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),
(u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),
'h'),
(u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),
None,
(u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),
(u'%s\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),
None,
(u'%s\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),
(u'%s\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),
(u'%s\t%s' % (_('Fractional Seconds'), 's'), 's'),
None,
(u'%s\t%s' % (_('AM/PM'), 'AA'), 'AA'),
(u'%s\t%s' % (_('am/pm'), 'aa'),'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return (GenTime(storedText).timeStr(format), True)
except GenTimeError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenTime(editText)), True)
except GenTimeError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
#field format edit options:
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')),
(_('T/F'), _('T/F')), None,
(_('yes/no'), _('yes/no')),
(_('Y/N'), _('Y/N')), None,
('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
try:
return (GenBoolean(storedText).boolStr(self.format), True)
except GenBooleanError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenBoolean(editText)), True)
except GenBooleanError:
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
#field format edit options:
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'),
(u'%s\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.\
match(self.format).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:for-each select = "./%s">%s<xsl:choose>'\
'<xsl:when test="contains(., \':\')"><a href="{.}">'\
'<xsl:value-of select="."/></a></xsl:when><xsl:otherwise>'\
'<a href="%s{.}"><xsl:value-of select="."/></a>'\
'</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \
(self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
# add prefix/suffix within the executable path:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' %
(escape(path, treedoc.escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = ['<img src="%s">' % escape(url, treedoc.escDict) for url
in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
|
normal
|
{
"blob_id": "5e1398ed628917a42cc465e7cc2979601f0f4fbc",
"index": 7865,
"step-1": "<mask token>\n\n\nclass DateFormat(TextFormat):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-2": "<mask token>\n\n\nclass TextFormat(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n <mask token>\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-3": "<mask token>\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-4": "<mask token>\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only is supported\"\"\"\n nonTagRe = re.compile('(.*?)(<.*?>)|(.*)')\n escDict = {'&nbsp;': ' '}\n\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None:\n return u'<xsl:text>%s</xsl:text>' % escape(matchObj.group(3),\n escDict)\n if matchObj.group(1):\n return u'<xsl:text>%s</xsl:text>%s' % (escape(matchObj.group(1),\n escDict), matchObj.group(2))\n return matchObj.group(2)\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-5": "#!/usr/bin/env python\n\n#****************************************************************************\n# fieldformat.py, provides non-GUI base classes for field formating\n#\n# TreeLine, an information storage program\n# Copyright (C) 2006, Douglas W. Bell\n#\n# This is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License, either Version 2 or any later\n# version. This program is distributed in the hope that it will be useful,\n# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.\n#****************************************************************************\n\nimport re\nfrom xml.sax.saxutils import escape, unescape\nfrom gennumber import GenNumber, GenNumberError\nfrom gendate import GenDate, GenDateError\nfrom gentime import GenTime, GenTimeError\nfrom genboolean import GenBoolean, GenBooleanError\nimport treedoc\nimport globalref\n\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only is supported\"\"\"\n nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')\n escDict = {'&nbsp;': ' '} # escape function does '&' first\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None: # no tags found\n return u'<xsl:text>%s</xsl:text>' % \\\n escape(matchObj.group(3), escDict)\n if matchObj.group(1): # leading text and tag\n return u'<xsl:text>%s</xsl:text>%s' % \\\n (escape(matchObj.group(1), escDict), matchObj.group(2))\n return matchObj.group(2) # tag only\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n #field format edit options:\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = '' # used only by fileFormat field for i18n\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n # defaults to no html (line breaks preserved)\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y') and \\\n True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y') and \\\n True or False\n try:\n self.numLines = int(attrs.get(u'lines',\n repr(self.defaultNumLines)))\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField,\n treedoc.escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return (editText, editText or not self.isRequired)\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:if test=\"normalize-space(./%s)\">%s'\\\n '<xsl:value-of select=\"./%s\"/>%s</xsl:if>' % \\\n (self.name, xslEscape(self.prefix), self.name,\n xslEscape(self.suffix))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n # typeName = 'LongText'\n defaultNumLines = 7\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n #field format edit options:\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'),\n (u'%s\\t%s' % (_('Required Digit'), '0'), '0'),\n (u'%s\\t%s' % (_('Digit or Space (external)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Decimal Point'), '.'), '.'),\n (u'%s\\t%s' % (_('Decimal Comma'), ','), ','),\n None,\n (u'%s\\t%s' % (_('Comma Separator'), '\\,'), '\\,'),\n (u'%s\\t%s' % (_('Dot Separator'), '\\.'), '\\.'),\n (u'%s\\t%s' % (_('Space Separator (internal)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Optional Sign'), '-'), '-'),\n (u'%s\\t%s' % (_('Required Sign'), '+'), '+'),\n None,\n (u'%s\\t%s' % (_('Exponent (capital)'), 'E'), 'E'),\n (u'%s\\t%s' % (_('Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n #field format edit options:\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None,\n (u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None,\n (u'%s\\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\0', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\0').\n split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = (',', ';', ':', '|', '/', '\\\\', '~')\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n #field format edit options:\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n #field format edit options:\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = ('Now', _('Now', 'date stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),\n (u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),\n None,\n (u'%s\\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),\n (u'%s\\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),\n (u'%s\\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),\n (u'%s\\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),\n None,\n (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),\n (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None,\n (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),\n (u'%s\\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),\n (u'%s\\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n #field format edit options:\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = ('Now', _('Now', 'time stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'),\n (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),\n (u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),\n 'h'),\n (u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),\n None,\n (u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),\n (u'%s\\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),\n None,\n (u'%s\\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),\n (u'%s\\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),\n (u'%s\\t%s' % (_('Fractional Seconds'), 's'), 's'),\n None,\n (u'%s\\t%s' % (_('AM/PM'), 'AA'), 'AA'),\n (u'%s\\t%s' % (_('am/pm'), 'aa'),'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n #field format edit options:\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')),\n (_('T/F'), _('T/F')), None,\n (_('yes/no'), _('yes/no')),\n (_('Y/N'), _('Y/N')), None,\n ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n #field format edit options:\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'),\n (u'%s\\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.\\\n match(self.format).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:for-each select = \"./%s\">%s<xsl:choose>'\\\n '<xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\">'\\\n '<xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise>'\\\n '<a href=\"%s{.}\"><xsl:value-of select=\".\"/></a>'\\\n '</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \\\n (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-ids": [
100,
168,
171,
173,
175
]
}
|
[
100,
168,
171,
173,
175
] |
<|reserved_special_token_0|>
def _get_change_making_matrix(set_of_coins, r):
matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)
]
for i in range(1, len(set_of_coins) + 1):
matrix[i][0] = i
return matrix
def change_making(coins, target):
"""This function assumes that all coins are available infinitely.
n is the number that we need to obtain with the fewest number of coins.
coins is a list or tuple with the available denominations."""
matrix = _get_change_making_matrix(coins, target)
for coin in range(1, len(coins) + 1):
for sub_target in range(1, target + 1):
if coins[coin - 1] == sub_target:
matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]
elif coins[coin - 1] > sub_target:
matrix[coin][sub_target] = matrix[coin - 1][sub_target]
else:
matrix[coin][sub_target] = matrix[coin - 1][sub_target
] + matrix[coin][sub_target - coins[coin - 1]]
return matrix[-1][-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_change_making_matrix(set_of_coins, r):
matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)
]
for i in range(1, len(set_of_coins) + 1):
matrix[i][0] = i
return matrix
def change_making(coins, target):
"""This function assumes that all coins are available infinitely.
n is the number that we need to obtain with the fewest number of coins.
coins is a list or tuple with the available denominations."""
matrix = _get_change_making_matrix(coins, target)
for coin in range(1, len(coins) + 1):
for sub_target in range(1, target + 1):
if coins[coin - 1] == sub_target:
matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]
elif coins[coin - 1] > sub_target:
matrix[coin][sub_target] = matrix[coin - 1][sub_target]
else:
matrix[coin][sub_target] = matrix[coin - 1][sub_target
] + matrix[coin][sub_target - coins[coin - 1]]
return matrix[-1][-1]
<|reserved_special_token_0|>
print(ways)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_change_making_matrix(set_of_coins, r):
matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)
]
for i in range(1, len(set_of_coins) + 1):
matrix[i][0] = i
return matrix
def change_making(coins, target):
"""This function assumes that all coins are available infinitely.
n is the number that we need to obtain with the fewest number of coins.
coins is a list or tuple with the available denominations."""
matrix = _get_change_making_matrix(coins, target)
for coin in range(1, len(coins) + 1):
for sub_target in range(1, target + 1):
if coins[coin - 1] == sub_target:
matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]
elif coins[coin - 1] > sub_target:
matrix[coin][sub_target] = matrix[coin - 1][sub_target]
else:
matrix[coin][sub_target] = matrix[coin - 1][sub_target
] + matrix[coin][sub_target - coins[coin - 1]]
return matrix[-1][-1]
input1 = input()
input2 = input()
n, m = input1.strip().split(' ')
n, m = [int(n), int(m)]
c = list(map(int, input2.strip().split(' ')))
ways = change_making(c, n)
print(ways)
<|reserved_special_token_1|>
import sys
def _get_change_making_matrix(set_of_coins, r):
matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)
]
for i in range(1, len(set_of_coins) + 1):
matrix[i][0] = i
return matrix
def change_making(coins, target):
"""This function assumes that all coins are available infinitely.
n is the number that we need to obtain with the fewest number of coins.
coins is a list or tuple with the available denominations."""
matrix = _get_change_making_matrix(coins, target)
for coin in range(1, len(coins) + 1):
for sub_target in range(1, target + 1):
if coins[coin - 1] == sub_target:
matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]
elif coins[coin - 1] > sub_target:
matrix[coin][sub_target] = matrix[coin - 1][sub_target]
else:
matrix[coin][sub_target] = matrix[coin - 1][sub_target
] + matrix[coin][sub_target - coins[coin - 1]]
return matrix[-1][-1]
input1 = input()
input2 = input()
n, m = input1.strip().split(' ')
n, m = [int(n), int(m)]
c = list(map(int, input2.strip().split(' ')))
ways = change_making(c, n)
print(ways)
<|reserved_special_token_1|>
#!/bin/python3
import sys
# import numpy as np
def _get_change_making_matrix(set_of_coins, r):
matrix = [[0 for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)]
# matrix = np.array(matrix)
for i in range(1,len(set_of_coins) + 1):
matrix[i][0] = i
return matrix
def change_making(coins, target):
"""This function assumes that all coins are available infinitely.
n is the number that we need to obtain with the fewest number of coins.
coins is a list or tuple with the available denominations."""
matrix = _get_change_making_matrix(coins, target)
for coin in range(1, len(coins) + 1):
for sub_target in range(1, target + 1):
# Just use the coin coins[c - 1].
if coins[coin - 1] == sub_target:
matrix[coin][sub_target] = 1+matrix[coin-1][sub_target]
# coins[c - 1] cannot be included.
# We use the previous solution for making r,
# excluding coins[c - 1].
elif coins[coin - 1] > sub_target:
matrix[coin][sub_target] = matrix[coin - 1][sub_target]
# We can use coins[c - 1].
# We need to decide which one of the following solutions is the best:
# 1. Using the previous solution for making r (without using coins[c - 1]).
# 2. Using the previous solution for making r - coins[c - 1] (without using coins[c - 1]) plus this 1 extra coin.
else:
matrix[coin][sub_target] = (matrix[coin - 1][sub_target]) + (
matrix[coin][sub_target - coins[coin - 1]])
return matrix[-1][-1]
input1 = input()
input2 = input()
# input1 = "10 4"
# input2 = "2 5 3 6"
n, m = input1.strip().split(' ')
n, m = [int(n), int(m)]
c = list(map(int, input2.strip().split(' ')))
# Print the number of ways of making change for 'n' units using coins having the values given by 'c'
ways = change_making(c, n)
print(ways)
|
flexible
|
{
"blob_id": "f15bc62fad2c47fed2e9e5d269284ebe7487b789",
"index": 2297,
"step-1": "<mask token>\n\n\ndef _get_change_making_matrix(set_of_coins, r):\n matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)\n ]\n for i in range(1, len(set_of_coins) + 1):\n matrix[i][0] = i\n return matrix\n\n\ndef change_making(coins, target):\n \"\"\"This function assumes that all coins are available infinitely.\n n is the number that we need to obtain with the fewest number of coins.\n coins is a list or tuple with the available denominations.\"\"\"\n matrix = _get_change_making_matrix(coins, target)\n for coin in range(1, len(coins) + 1):\n for sub_target in range(1, target + 1):\n if coins[coin - 1] == sub_target:\n matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]\n elif coins[coin - 1] > sub_target:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target]\n else:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target\n ] + matrix[coin][sub_target - coins[coin - 1]]\n return matrix[-1][-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _get_change_making_matrix(set_of_coins, r):\n matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)\n ]\n for i in range(1, len(set_of_coins) + 1):\n matrix[i][0] = i\n return matrix\n\n\ndef change_making(coins, target):\n \"\"\"This function assumes that all coins are available infinitely.\n n is the number that we need to obtain with the fewest number of coins.\n coins is a list or tuple with the available denominations.\"\"\"\n matrix = _get_change_making_matrix(coins, target)\n for coin in range(1, len(coins) + 1):\n for sub_target in range(1, target + 1):\n if coins[coin - 1] == sub_target:\n matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]\n elif coins[coin - 1] > sub_target:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target]\n else:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target\n ] + matrix[coin][sub_target - coins[coin - 1]]\n return matrix[-1][-1]\n\n\n<mask token>\nprint(ways)\n",
"step-3": "<mask token>\n\n\ndef _get_change_making_matrix(set_of_coins, r):\n matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)\n ]\n for i in range(1, len(set_of_coins) + 1):\n matrix[i][0] = i\n return matrix\n\n\ndef change_making(coins, target):\n \"\"\"This function assumes that all coins are available infinitely.\n n is the number that we need to obtain with the fewest number of coins.\n coins is a list or tuple with the available denominations.\"\"\"\n matrix = _get_change_making_matrix(coins, target)\n for coin in range(1, len(coins) + 1):\n for sub_target in range(1, target + 1):\n if coins[coin - 1] == sub_target:\n matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]\n elif coins[coin - 1] > sub_target:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target]\n else:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target\n ] + matrix[coin][sub_target - coins[coin - 1]]\n return matrix[-1][-1]\n\n\ninput1 = input()\ninput2 = input()\nn, m = input1.strip().split(' ')\nn, m = [int(n), int(m)]\nc = list(map(int, input2.strip().split(' ')))\nways = change_making(c, n)\nprint(ways)\n",
"step-4": "import sys\n\n\ndef _get_change_making_matrix(set_of_coins, r):\n matrix = [[(0) for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)\n ]\n for i in range(1, len(set_of_coins) + 1):\n matrix[i][0] = i\n return matrix\n\n\ndef change_making(coins, target):\n \"\"\"This function assumes that all coins are available infinitely.\n n is the number that we need to obtain with the fewest number of coins.\n coins is a list or tuple with the available denominations.\"\"\"\n matrix = _get_change_making_matrix(coins, target)\n for coin in range(1, len(coins) + 1):\n for sub_target in range(1, target + 1):\n if coins[coin - 1] == sub_target:\n matrix[coin][sub_target] = 1 + matrix[coin - 1][sub_target]\n elif coins[coin - 1] > sub_target:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target]\n else:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target\n ] + matrix[coin][sub_target - coins[coin - 1]]\n return matrix[-1][-1]\n\n\ninput1 = input()\ninput2 = input()\nn, m = input1.strip().split(' ')\nn, m = [int(n), int(m)]\nc = list(map(int, input2.strip().split(' ')))\nways = change_making(c, n)\nprint(ways)\n",
"step-5": "#!/bin/python3\n\nimport sys\n# import numpy as np\n\n\ndef _get_change_making_matrix(set_of_coins, r):\n matrix = [[0 for _ in range(r + 1)] for _ in range(len(set_of_coins) + 1)]\n # matrix = np.array(matrix)\n for i in range(1,len(set_of_coins) + 1):\n matrix[i][0] = i\n\n return matrix\n\n\ndef change_making(coins, target):\n \"\"\"This function assumes that all coins are available infinitely.\n n is the number that we need to obtain with the fewest number of coins.\n coins is a list or tuple with the available denominations.\"\"\"\n matrix = _get_change_making_matrix(coins, target)\n\n for coin in range(1, len(coins) + 1):\n\n for sub_target in range(1, target + 1):\n\n # Just use the coin coins[c - 1].\n if coins[coin - 1] == sub_target:\n matrix[coin][sub_target] = 1+matrix[coin-1][sub_target]\n\n # coins[c - 1] cannot be included.\n # We use the previous solution for making r,\n # excluding coins[c - 1].\n elif coins[coin - 1] > sub_target:\n matrix[coin][sub_target] = matrix[coin - 1][sub_target]\n\n # We can use coins[c - 1].\n # We need to decide which one of the following solutions is the best:\n # 1. Using the previous solution for making r (without using coins[c - 1]).\n # 2. Using the previous solution for making r - coins[c - 1] (without using coins[c - 1]) plus this 1 extra coin.\n else:\n matrix[coin][sub_target] = (matrix[coin - 1][sub_target]) + (\n matrix[coin][sub_target - coins[coin - 1]])\n\n return matrix[-1][-1]\n\n\ninput1 = input()\ninput2 = input()\n\n# input1 = \"10 4\"\n# input2 = \"2 5 3 6\"\n\nn, m = input1.strip().split(' ')\nn, m = [int(n), int(m)]\nc = list(map(int, input2.strip().split(' ')))\n# Print the number of ways of making change for 'n' units using coins having the values given by 'c'\nways = change_making(c, n)\nprint(ways)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Category(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,
blank=True)
subcategory = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=
True, blank=True)
department = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE, null=
True, blank=True)
section = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,
blank=True)
subsection = models.CharField(unique=True, max_length=500, null=True,
blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name = models.CharField(max_length=30)
fact = models.ForeignKey(Factory, on_delete=models.CASCADE)
fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)
was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)
sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)
subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True
)
def __str__(self):
return str(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Wash(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.wash
class Category(models.Model):
cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)
name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,
blank=True)
category = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,
blank=True)
subcategory = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=
True, blank=True)
department = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE, null=
True, blank=True)
section = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,
blank=True)
subsection = models.CharField(unique=True, max_length=500, null=True,
blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name = models.CharField(max_length=30)
fact = models.ForeignKey(Factory, on_delete=models.CASCADE)
fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)
was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)
sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)
subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True
)
def __str__(self):
return str(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Fabric(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Wash(models.Model):
name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,
blank=True)
wash = models.CharField(unique=True, max_length=100, null=True, blank=True)
def __str__(self):
return self.wash
class Category(models.Model):
cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)
name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,
blank=True)
category = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,
blank=True)
subcategory = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=
True, blank=True)
department = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE, null=
True, blank=True)
section = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,
blank=True)
subsection = models.CharField(unique=True, max_length=500, null=True,
blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name = models.CharField(max_length=30)
fact = models.ForeignKey(Factory, on_delete=models.CASCADE)
fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)
was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)
sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)
subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True
)
def __str__(self):
return str(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Fabric(models.Model):
name = models.ForeignKey(Factory, on_delete=models.CASCADE, null=True,
blank=True)
fabric = models.CharField(unique=True, max_length=100, null=True, blank
=True)
def __str__(self):
return self.fabric
class Wash(models.Model):
name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,
blank=True)
wash = models.CharField(unique=True, max_length=100, null=True, blank=True)
def __str__(self):
return self.wash
class Category(models.Model):
cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)
name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,
blank=True)
category = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,
blank=True)
subcategory = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=
True, blank=True)
department = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE, null=
True, blank=True)
section = models.CharField(unique=True, max_length=100, null=True,
blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,
blank=True)
subsection = models.CharField(unique=True, max_length=500, null=True,
blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name = models.CharField(max_length=30)
fact = models.ForeignKey(Factory, on_delete=models.CASCADE)
fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)
was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)
sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)
subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True
)
def __str__(self):
return str(self.name)
<|reserved_special_token_1|>
from django.db import models
ch=[
('Garment','Garment'),
('Hardgoods','Hardgoods'),
('Home Furnishing','Home Furnishing'),
]
class Factory(models.Model):
name = models.CharField(max_length=30,choices=ch)
def __str__(self):
return self.name
class Fabric(models.Model):
name = models.ForeignKey(Factory, on_delete=models.CASCADE,null=True,blank=True)
fabric = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.fabric
class Wash(models.Model):
name=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True,blank=True)
wash = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.wash
class Category(models.Model):
cat=models.ForeignKey(Factory,on_delete=models.CASCADE,blank=True)
name = models.ForeignKey(Wash, on_delete=models.CASCADE,null=True,blank=True)
category = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE,null=True,blank=True)
subcategory = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE,null=True,blank=True)
department = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)
section = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE,null=True,blank=True)
subsection = models.CharField(unique=True,max_length=500,null=True,blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name=models.CharField(max_length=30)
fact=models.ForeignKey(Factory,on_delete=models.CASCADE)
fab=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True)
was= models.ForeignKey(Wash, on_delete=models.CASCADE,null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat=models.ForeignKey(Subcategory,on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE,null=True)
sect=models.ForeignKey(Sections,on_delete=models.CASCADE,null=True)
subsect=models.ForeignKey(Subsection,on_delete=models.CASCADE,null=True)
def __str__(self):
return str(self.name)
|
flexible
|
{
"blob_id": "a0dcfb738451c11ed4ff1428629c3f7bbf5c52c9",
"index": 5649,
"step-1": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-2": "<mask token>\n\n\nclass Wash(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-3": "<mask token>\n\n\nclass Fabric(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Wash(models.Model):\n name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,\n blank=True)\n wash = models.CharField(unique=True, max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-4": "<mask token>\n\n\nclass Fabric(models.Model):\n name = models.ForeignKey(Factory, on_delete=models.CASCADE, null=True,\n blank=True)\n fabric = models.CharField(unique=True, max_length=100, null=True, blank\n =True)\n\n def __str__(self):\n return self.fabric\n\n\nclass Wash(models.Model):\n name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,\n blank=True)\n wash = models.CharField(unique=True, max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-5": "from django.db import models\r\n\r\n\r\nch=[\r\n ('Garment','Garment'),\r\n ('Hardgoods','Hardgoods'),\r\n ('Home Furnishing','Home Furnishing'),\r\n]\r\nclass Factory(models.Model):\r\n name = models.CharField(max_length=30,choices=ch)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\nclass Fabric(models.Model):\r\n name = models.ForeignKey(Factory, on_delete=models.CASCADE,null=True,blank=True)\r\n fabric = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.fabric\r\n\r\nclass Wash(models.Model):\r\n name=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True,blank=True)\r\n wash = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n\r\n def __str__(self):\r\n return self.wash\r\n\r\nclass Category(models.Model):\r\n cat=models.ForeignKey(Factory,on_delete=models.CASCADE,blank=True)\r\n name = models.ForeignKey(Wash, on_delete=models.CASCADE,null=True,blank=True)\r\n category = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.category\r\n\r\nclass Subcategory(models.Model):\r\n name = models.ForeignKey(Category, on_delete=models.CASCADE,null=True,blank=True)\r\n subcategory = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.subcategory\r\n\r\nclass Department(models.Model):\r\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE,null=True,blank=True)\r\n department = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.department\r\n\r\nclass Sections(models.Model):\r\n name = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)\r\n section = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.section\r\n\r\nclass Subsection(models.Model):\r\n name = models.ForeignKey(Sections, on_delete=models.CASCADE,null=True,blank=True)\r\n subsection = models.CharField(unique=True,max_length=500,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.subsection\r\n\r\nclass Person(models.Model):\r\n name=models.CharField(max_length=30)\r\n fact=models.ForeignKey(Factory,on_delete=models.CASCADE)\r\n fab=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True)\r\n was= models.ForeignKey(Wash, on_delete=models.CASCADE,null=True)\r\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\r\n subcat=models.ForeignKey(Subcategory,on_delete=models.CASCADE)\r\n dept = models.ForeignKey(Department, on_delete=models.CASCADE,null=True)\r\n sect=models.ForeignKey(Sections,on_delete=models.CASCADE,null=True)\r\n subsect=models.ForeignKey(Subsection,on_delete=models.CASCADE,null=True)\r\n\r\n def __str__(self):\r\n return str(self.name)\r\n",
"step-ids": [
17,
20,
22,
24,
30
]
}
|
[
17,
20,
22,
24,
30
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Base:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Base:
def getTime(self):
"""
获取时间戳
:return:
"""
return str(time.time()).split('.')[0]
<|reserved_special_token_1|>
import time
class Base:
def getTime(self):
"""
获取时间戳
:return:
"""
return str(time.time()).split('.')[0]
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
import time
class Base:
def getTime(self):
'''
获取时间戳
:return:
'''
return str(time.time()).split('.')[0]
|
flexible
|
{
"blob_id": "28a920072bad1b411d71f7f70cd991cb7dfbeb8c",
"index": 8754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Base:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Base:\n\n def getTime(self):\n \"\"\"\n 获取时间戳\n :return: \n \"\"\"\n return str(time.time()).split('.')[0]\n",
"step-4": "import time\n\n\nclass Base:\n\n def getTime(self):\n \"\"\"\n 获取时间戳\n :return: \n \"\"\"\n return str(time.time()).split('.')[0]\n",
"step-5": "# -*- coding:utf-8 -*-\nimport time\nclass Base:\n def getTime(self):\n '''\n 获取时间戳\n :return: \n '''\n return str(time.time()).split('.')[0]",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List
def uppercase_first_letter(string: str) ->str:
return string[0:1].upper() + string[1:]
string_list: List[str] = input('Please, input string: ').split(' ')
result: str = ''
for i, value in enumerate(string_list):
result += (lambda index: '' if index == 0 else ' ')(i
) + uppercase_first_letter(value)
print(result)
|
normal
|
{
"blob_id": "0555c577a8fb746cf2debb929d02b46cd3be4d7b",
"index": 1062,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\nstring_list: List[str] = input('Please, input string: ').split(' ')\nresult: str = ''\nfor i, value in enumerate(string_list):\n result += (lambda index: '' if index == 0 else ' ')(i\n ) + uppercase_first_letter(value)\nprint(result)\n",
"step-4": "from typing import List\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\nstring_list: List[str] = input('Please, input string: ').split(' ')\nresult: str = ''\nfor i, value in enumerate(string_list):\n result += (lambda index: '' if index == 0 else ' ')(i\n ) + uppercase_first_letter(value)\nprint(result)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from interpreter import Interpreter
from pretty_print import PrettyPrint
from ast import Operator, Operation, Element
class Token:
operator = False
empty = False
def __init__(self, token):
self.token = token
if token == '+':
self.operator = True
elif token == '-':
self.operator = True
elif token == '*':
self.operator = True
elif token == '/':
self.operator = True
elif token == '':
self.empty = True
else:
self.token = int(self.token)
class Lexer:
def __init__(self, stream):
self.stream = stream
def get_token(self):
next_space = self.stream.find(' ')
if next_space == -1:
token = Token(self.stream)
self.stream = ''
return token
token = self.stream[:next_space]
self.stream = self.stream[next_space + 1:]
return Token(token)
class Parser:
def __init__(self, token_stream):
self.token_stream = token_stream
def parse(self, ast = None):
if ast is None:
first_token = self.token_stream.get_token()
ast = Element(first_token.token)
operator = self.token_stream.get_token()
if operator.empty:
return ast
if operator.operator:
return self.parse_operator(ast, operator)
def parse_operator(self, left_ast, operator):
right_token = self.token_stream.get_token()
return self.parse(
Operation(
left_ast,
Element(right_token.token),
Operator(operator.token)
)
)
def test_ast():
calcul_visitor = Interpreter()
op = Operation(Element(7), Element(3), Operator('+'))
Operation(op, op, Operator('+')).accept(calcul_visitor)
calcul_visitor.print_result()
def test_lexer():
string = '1 + 3 + 4 + 50 + 1 + 0'
lexer = Lexer(string)
token = lexer.get_token()
while (not token.empty):
print(token.token)
token = lexer.get_token()
def test_parser():
parser = Parser(Lexer('1 + 2 + 3'))
ast = parser.parse()
ast.accept(PrettyPrint())
print()
while True:
try:
_in = input('string to calculate:')
except EOFError:
print()
break
ast = Parser(Lexer(_in)).parse()
ast.accept(PrettyPrint())
calc = Interpreter()
ast.accept(calc)
print(' = ', end='')
calc.print_result()
|
normal
|
{
"blob_id": "0d6c1e74a274b3e8ad9c63ecaa125f79976db9b4",
"index": 1734,
"step-1": "<mask token>\n\n\nclass Token:\n operator = False\n empty = False\n\n def __init__(self, token):\n self.token = token\n if token == '+':\n self.operator = True\n elif token == '-':\n self.operator = True\n elif token == '*':\n self.operator = True\n elif token == '/':\n self.operator = True\n elif token == '':\n self.empty = True\n else:\n self.token = int(self.token)\n\n\nclass Lexer:\n\n def __init__(self, stream):\n self.stream = stream\n\n def get_token(self):\n next_space = self.stream.find(' ')\n if next_space == -1:\n token = Token(self.stream)\n self.stream = ''\n return token\n token = self.stream[:next_space]\n self.stream = self.stream[next_space + 1:]\n return Token(token)\n\n\nclass Parser:\n\n def __init__(self, token_stream):\n self.token_stream = token_stream\n\n def parse(self, ast=None):\n if ast is None:\n first_token = self.token_stream.get_token()\n ast = Element(first_token.token)\n operator = self.token_stream.get_token()\n if operator.empty:\n return ast\n if operator.operator:\n return self.parse_operator(ast, operator)\n\n def parse_operator(self, left_ast, operator):\n right_token = self.token_stream.get_token()\n return self.parse(Operation(left_ast, Element(right_token.token),\n Operator(operator.token)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Token:\n operator = False\n empty = False\n\n def __init__(self, token):\n self.token = token\n if token == '+':\n self.operator = True\n elif token == '-':\n self.operator = True\n elif token == '*':\n self.operator = True\n elif token == '/':\n self.operator = True\n elif token == '':\n self.empty = True\n else:\n self.token = int(self.token)\n\n\nclass Lexer:\n\n def __init__(self, stream):\n self.stream = stream\n\n def get_token(self):\n next_space = self.stream.find(' ')\n if next_space == -1:\n token = Token(self.stream)\n self.stream = ''\n return token\n token = self.stream[:next_space]\n self.stream = self.stream[next_space + 1:]\n return Token(token)\n\n\nclass Parser:\n\n def __init__(self, token_stream):\n self.token_stream = token_stream\n\n def parse(self, ast=None):\n if ast is None:\n first_token = self.token_stream.get_token()\n ast = Element(first_token.token)\n operator = self.token_stream.get_token()\n if operator.empty:\n return ast\n if operator.operator:\n return self.parse_operator(ast, operator)\n\n def parse_operator(self, left_ast, operator):\n right_token = self.token_stream.get_token()\n return self.parse(Operation(left_ast, Element(right_token.token),\n Operator(operator.token)))\n\n\ndef test_ast():\n calcul_visitor = Interpreter()\n op = Operation(Element(7), Element(3), Operator('+'))\n Operation(op, op, Operator('+')).accept(calcul_visitor)\n calcul_visitor.print_result()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Token:\n operator = False\n empty = False\n\n def __init__(self, token):\n self.token = token\n if token == '+':\n self.operator = True\n elif token == '-':\n self.operator = True\n elif token == '*':\n self.operator = True\n elif token == '/':\n self.operator = True\n elif token == '':\n self.empty = True\n else:\n self.token = int(self.token)\n\n\nclass Lexer:\n\n def __init__(self, stream):\n self.stream = stream\n\n def get_token(self):\n next_space = self.stream.find(' ')\n if next_space == -1:\n token = Token(self.stream)\n self.stream = ''\n return token\n token = self.stream[:next_space]\n self.stream = self.stream[next_space + 1:]\n return Token(token)\n\n\nclass Parser:\n\n def __init__(self, token_stream):\n self.token_stream = token_stream\n\n def parse(self, ast=None):\n if ast is None:\n first_token = self.token_stream.get_token()\n ast = Element(first_token.token)\n operator = self.token_stream.get_token()\n if operator.empty:\n return ast\n if operator.operator:\n return self.parse_operator(ast, operator)\n\n def parse_operator(self, left_ast, operator):\n right_token = self.token_stream.get_token()\n return self.parse(Operation(left_ast, Element(right_token.token),\n Operator(operator.token)))\n\n\ndef test_ast():\n calcul_visitor = Interpreter()\n op = Operation(Element(7), Element(3), Operator('+'))\n Operation(op, op, Operator('+')).accept(calcul_visitor)\n calcul_visitor.print_result()\n\n\ndef test_lexer():\n string = '1 + 3 + 4 + 50 + 1 + 0'\n lexer = Lexer(string)\n token = lexer.get_token()\n while not token.empty:\n print(token.token)\n token = lexer.get_token()\n\n\ndef test_parser():\n parser = Parser(Lexer('1 + 2 + 3'))\n ast = parser.parse()\n ast.accept(PrettyPrint())\n print()\n\n\n<mask token>\n",
"step-4": "from interpreter import Interpreter\nfrom pretty_print import PrettyPrint\nfrom ast import Operator, Operation, Element\n\n\nclass Token:\n operator = False\n empty = False\n\n def __init__(self, token):\n self.token = token\n if token == '+':\n self.operator = True\n elif token == '-':\n self.operator = True\n elif token == '*':\n self.operator = True\n elif token == '/':\n self.operator = True\n elif token == '':\n self.empty = True\n else:\n self.token = int(self.token)\n\n\nclass Lexer:\n\n def __init__(self, stream):\n self.stream = stream\n\n def get_token(self):\n next_space = self.stream.find(' ')\n if next_space == -1:\n token = Token(self.stream)\n self.stream = ''\n return token\n token = self.stream[:next_space]\n self.stream = self.stream[next_space + 1:]\n return Token(token)\n\n\nclass Parser:\n\n def __init__(self, token_stream):\n self.token_stream = token_stream\n\n def parse(self, ast=None):\n if ast is None:\n first_token = self.token_stream.get_token()\n ast = Element(first_token.token)\n operator = self.token_stream.get_token()\n if operator.empty:\n return ast\n if operator.operator:\n return self.parse_operator(ast, operator)\n\n def parse_operator(self, left_ast, operator):\n right_token = self.token_stream.get_token()\n return self.parse(Operation(left_ast, Element(right_token.token),\n Operator(operator.token)))\n\n\ndef test_ast():\n calcul_visitor = Interpreter()\n op = Operation(Element(7), Element(3), Operator('+'))\n Operation(op, op, Operator('+')).accept(calcul_visitor)\n calcul_visitor.print_result()\n\n\ndef test_lexer():\n string = '1 + 3 + 4 + 50 + 1 + 0'\n lexer = Lexer(string)\n token = lexer.get_token()\n while not token.empty:\n print(token.token)\n token = lexer.get_token()\n\n\ndef test_parser():\n parser = Parser(Lexer('1 + 2 + 3'))\n ast = parser.parse()\n ast.accept(PrettyPrint())\n print()\n\n\nwhile True:\n try:\n _in = input('string to calculate:')\n except EOFError:\n print()\n break\n ast = Parser(Lexer(_in)).parse()\n ast.accept(PrettyPrint())\n calc = Interpreter()\n ast.accept(calc)\n print(' = ', end='')\n calc.print_result()\n",
"step-5": "from interpreter import Interpreter\nfrom pretty_print import PrettyPrint\nfrom ast import Operator, Operation, Element\n\nclass Token:\n operator = False\n empty = False\n def __init__(self, token):\n self.token = token\n if token == '+':\n self.operator = True\n elif token == '-':\n self.operator = True\n elif token == '*':\n self.operator = True\n elif token == '/':\n self.operator = True\n elif token == '':\n self.empty = True\n else:\n self.token = int(self.token)\n\n\nclass Lexer:\n def __init__(self, stream):\n self.stream = stream\n\n def get_token(self):\n next_space = self.stream.find(' ')\n if next_space == -1:\n token = Token(self.stream)\n self.stream = ''\n return token\n token = self.stream[:next_space]\n self.stream = self.stream[next_space + 1:]\n return Token(token)\n\n\nclass Parser:\n def __init__(self, token_stream):\n self.token_stream = token_stream\n\n def parse(self, ast = None):\n if ast is None:\n first_token = self.token_stream.get_token()\n ast = Element(first_token.token)\n operator = self.token_stream.get_token()\n if operator.empty:\n return ast\n if operator.operator:\n return self.parse_operator(ast, operator)\n\n def parse_operator(self, left_ast, operator):\n right_token = self.token_stream.get_token()\n return self.parse(\n Operation(\n left_ast,\n Element(right_token.token),\n Operator(operator.token)\n )\n )\n\ndef test_ast():\n calcul_visitor = Interpreter()\n op = Operation(Element(7), Element(3), Operator('+'))\n Operation(op, op, Operator('+')).accept(calcul_visitor)\n calcul_visitor.print_result()\n\ndef test_lexer():\n string = '1 + 3 + 4 + 50 + 1 + 0'\n lexer = Lexer(string)\n token = lexer.get_token()\n while (not token.empty):\n print(token.token)\n token = lexer.get_token()\n\ndef test_parser():\n parser = Parser(Lexer('1 + 2 + 3'))\n ast = parser.parse()\n ast.accept(PrettyPrint())\n print()\n\nwhile True:\n try:\n _in = input('string to calculate:')\n except EOFError:\n print()\n break\n ast = Parser(Lexer(_in)).parse()\n ast.accept(PrettyPrint())\n calc = Interpreter()\n ast.accept(calc)\n print(' = ', end='')\n calc.print_result()\n",
"step-ids": [
10,
11,
13,
15,
16
]
}
|
[
10,
11,
13,
15,
16
] |
from api.decidim_connector import DecidimConnector
from api.participatory_processes_reader import ParticipatoryProcessesReader
from api.version_reader import VersionReader
API_URL = "https://meta.decidim.org/api"
decidim_connector = DecidimConnector(API_URL)
version_reader = VersionReader(decidim_connector)
version = version_reader.process_query()
print(version)
participatory_processes_reader = ParticipatoryProcessesReader(decidim_connector)
participatory_processes = participatory_processes_reader.process_query()
|
normal
|
{
"blob_id": "88a469eba61fb6968db8cc5e1f93f12093b7f128",
"index": 6973,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(version)\n<mask token>\n",
"step-3": "<mask token>\nAPI_URL = 'https://meta.decidim.org/api'\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector\n )\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-4": "from api.decidim_connector import DecidimConnector\nfrom api.participatory_processes_reader import ParticipatoryProcessesReader\nfrom api.version_reader import VersionReader\nAPI_URL = 'https://meta.decidim.org/api'\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector\n )\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-5": "from api.decidim_connector import DecidimConnector\nfrom api.participatory_processes_reader import ParticipatoryProcessesReader\nfrom api.version_reader import VersionReader\n\nAPI_URL = \"https://meta.decidim.org/api\"\ndecidim_connector = DecidimConnector(API_URL)\nversion_reader = VersionReader(decidim_connector)\nversion = version_reader.process_query()\nprint(version)\n\nparticipatory_processes_reader = ParticipatoryProcessesReader(decidim_connector)\nparticipatory_processes = participatory_processes_reader.process_query()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 16:11:46 2021
@author: Suman
"""
import numpy as np
import cv2
rect = (0,0,0,0)
startPoint = False
endPoint = False
def mark_object(event,x,y,flags,params):
global rect,startPoint,endPoint
# get mouse click
if event == cv2.EVENT_LBUTTONDOWN:
if startPoint == True and endPoint == True:
startPoint = False
endPoint = False
rect = (0, 0, 0, 0)
if startPoint == False:
rect = (x, y, 0, 0)
startPoint = True
elif endPoint == False:
rect = (rect[0], rect[1], x, y)
endPoint = True
cap = cv2.VideoCapture('movingball.mp4')
#Reading the first frame
(grabbed, frame) = cap.read()
while(cap.isOpened()):
(grabbed, frame) = cap.read()
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', mark_object)
#drawing rectangle
if startPoint == True and endPoint == True:
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0, 255), 2)
cv2.imshow('frame',frame)
if cv2.waitKey(100)& 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "0f3e19b02dbe508bc4e0ef7879af81a9eabfd8c9",
"index": 6141,
"step-1": "<mask token>\n\n\ndef mark_object(event, x, y, flags, params):\n global rect, startPoint, endPoint\n if event == cv2.EVENT_LBUTTONDOWN:\n if startPoint == True and endPoint == True:\n startPoint = False\n endPoint = False\n rect = 0, 0, 0, 0\n if startPoint == False:\n rect = x, y, 0, 0\n startPoint = True\n elif endPoint == False:\n rect = rect[0], rect[1], x, y\n endPoint = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mark_object(event, x, y, flags, params):\n global rect, startPoint, endPoint\n if event == cv2.EVENT_LBUTTONDOWN:\n if startPoint == True and endPoint == True:\n startPoint = False\n endPoint = False\n rect = 0, 0, 0, 0\n if startPoint == False:\n rect = x, y, 0, 0\n startPoint = True\n elif endPoint == False:\n rect = rect[0], rect[1], x, y\n endPoint = True\n\n\n<mask token>\nwhile cap.isOpened():\n grabbed, frame = cap.read()\n cv2.namedWindow('frame')\n cv2.setMouseCallback('frame', mark_object)\n if startPoint == True and endPoint == True:\n cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0,\n 255), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(100) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nrect = 0, 0, 0, 0\nstartPoint = False\nendPoint = False\n\n\ndef mark_object(event, x, y, flags, params):\n global rect, startPoint, endPoint\n if event == cv2.EVENT_LBUTTONDOWN:\n if startPoint == True and endPoint == True:\n startPoint = False\n endPoint = False\n rect = 0, 0, 0, 0\n if startPoint == False:\n rect = x, y, 0, 0\n startPoint = True\n elif endPoint == False:\n rect = rect[0], rect[1], x, y\n endPoint = True\n\n\ncap = cv2.VideoCapture('movingball.mp4')\ngrabbed, frame = cap.read()\nwhile cap.isOpened():\n grabbed, frame = cap.read()\n cv2.namedWindow('frame')\n cv2.setMouseCallback('frame', mark_object)\n if startPoint == True and endPoint == True:\n cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0,\n 255), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(100) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-4": "<mask token>\nimport numpy as np\nimport cv2\nrect = 0, 0, 0, 0\nstartPoint = False\nendPoint = False\n\n\ndef mark_object(event, x, y, flags, params):\n global rect, startPoint, endPoint\n if event == cv2.EVENT_LBUTTONDOWN:\n if startPoint == True and endPoint == True:\n startPoint = False\n endPoint = False\n rect = 0, 0, 0, 0\n if startPoint == False:\n rect = x, y, 0, 0\n startPoint = True\n elif endPoint == False:\n rect = rect[0], rect[1], x, y\n endPoint = True\n\n\ncap = cv2.VideoCapture('movingball.mp4')\ngrabbed, frame = cap.read()\nwhile cap.isOpened():\n grabbed, frame = cap.read()\n cv2.namedWindow('frame')\n cv2.setMouseCallback('frame', mark_object)\n if startPoint == True and endPoint == True:\n cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0,\n 255), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(100) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 16 16:11:46 2021\r\n\r\n@author: Suman\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nrect = (0,0,0,0)\r\nstartPoint = False\r\nendPoint = False\r\n\r\ndef mark_object(event,x,y,flags,params):\r\n\r\n global rect,startPoint,endPoint\r\n\r\n # get mouse click\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n\r\n if startPoint == True and endPoint == True:\r\n startPoint = False\r\n endPoint = False\r\n rect = (0, 0, 0, 0)\r\n\r\n if startPoint == False:\r\n rect = (x, y, 0, 0)\r\n startPoint = True\r\n elif endPoint == False:\r\n rect = (rect[0], rect[1], x, y)\r\n endPoint = True\r\n\r\ncap = cv2.VideoCapture('movingball.mp4')\r\n\r\n\r\n#Reading the first frame\r\n(grabbed, frame) = cap.read()\r\n\r\nwhile(cap.isOpened()):\r\n\r\n (grabbed, frame) = cap.read()\r\n\r\n cv2.namedWindow('frame')\r\n cv2.setMouseCallback('frame', mark_object) \r\n\r\n #drawing rectangle\r\n if startPoint == True and endPoint == True:\r\n cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0, 255), 2)\r\n\r\n cv2.imshow('frame',frame)\r\n\r\n\r\n\r\n if cv2.waitKey(100)& 0xFF==ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from interface_app.models import TestTask, TestCase
from interface_app.extend.task_run import run_cases
import os
import json
from interface_app.apps import TASK_PATH, RUN_TASK_FILE
"""
说明:接口任务文件,返回HTML页面
"""
# 获取任务列表
def task_manage(request):
testtasks = TestTask.objects.all()
if request.method == "GET":
return render(request, "task_manage.html", {
"type": "list",
"testtasks": testtasks,
})
else:
return HttpResponse("404")
# 创建任务
def add_task(request):
if request.method == "GET":
return render(request, "add_task.html", {
"type": "add",
})
else:
return HttpResponse("404")
# 运行任务
def run_task(request, tid):
if request.method == "GET":
task_obj = TestTask.objects.get(id=tid)
cases_list = task_obj.cases.split(",")
cases_list.pop(-1)
task_obj.status = 1 # 修改状态
task_obj.save()
print(cases_list)
# run_cases() #运行函数
all_cases_dict = {}
for case_id in cases_list:
case_obj = TestCase.objects.get(id=case_id)
case_dict = {
"url": case_obj.url,
"method": case_obj.req_method,
"type_": case_obj.req_type,
"header": case_obj.req_header,
"parameter": case_obj.req_parameter,
"assert_": case_obj.resp_assert
}
all_cases_dict[case_obj.id] = case_dict
print(all_cases_dict)
cases_str = json.dumps(all_cases_dict)
cases_data_file = TASK_PATH + "cases_data.json"
print(cases_data_file)
with open(cases_data_file, "w+") as f:
f.write(cases_str)
# 运行测试
os.system("python3 " + RUN_TASK_FILE)
return HttpResponseRedirect("/interface/task_manage")
else:
return HttpResponse("404")
# 如何去运行这些用例?--单元测试框架 + 数据驱动
# unittest + ddt
|
normal
|
{
"blob_id": "8be70543a7aa177d9ad48fb736228b1ffba5df16",
"index": 6179,
"step-1": "<mask token>\n\n\ndef run_task(request, tid):\n if request.method == 'GET':\n task_obj = TestTask.objects.get(id=tid)\n cases_list = task_obj.cases.split(',')\n cases_list.pop(-1)\n task_obj.status = 1\n task_obj.save()\n print(cases_list)\n all_cases_dict = {}\n for case_id in cases_list:\n case_obj = TestCase.objects.get(id=case_id)\n case_dict = {'url': case_obj.url, 'method': case_obj.req_method,\n 'type_': case_obj.req_type, 'header': case_obj.req_header,\n 'parameter': case_obj.req_parameter, 'assert_': case_obj.\n resp_assert}\n all_cases_dict[case_obj.id] = case_dict\n print(all_cases_dict)\n cases_str = json.dumps(all_cases_dict)\n cases_data_file = TASK_PATH + 'cases_data.json'\n print(cases_data_file)\n with open(cases_data_file, 'w+') as f:\n f.write(cases_str)\n os.system('python3 ' + RUN_TASK_FILE)\n return HttpResponseRedirect('/interface/task_manage')\n else:\n return HttpResponse('404')\n",
"step-2": "<mask token>\n\n\ndef add_task(request):\n if request.method == 'GET':\n return render(request, 'add_task.html', {'type': 'add'})\n else:\n return HttpResponse('404')\n\n\ndef run_task(request, tid):\n if request.method == 'GET':\n task_obj = TestTask.objects.get(id=tid)\n cases_list = task_obj.cases.split(',')\n cases_list.pop(-1)\n task_obj.status = 1\n task_obj.save()\n print(cases_list)\n all_cases_dict = {}\n for case_id in cases_list:\n case_obj = TestCase.objects.get(id=case_id)\n case_dict = {'url': case_obj.url, 'method': case_obj.req_method,\n 'type_': case_obj.req_type, 'header': case_obj.req_header,\n 'parameter': case_obj.req_parameter, 'assert_': case_obj.\n resp_assert}\n all_cases_dict[case_obj.id] = case_dict\n print(all_cases_dict)\n cases_str = json.dumps(all_cases_dict)\n cases_data_file = TASK_PATH + 'cases_data.json'\n print(cases_data_file)\n with open(cases_data_file, 'w+') as f:\n f.write(cases_str)\n os.system('python3 ' + RUN_TASK_FILE)\n return HttpResponseRedirect('/interface/task_manage')\n else:\n return HttpResponse('404')\n",
"step-3": "<mask token>\n\n\ndef task_manage(request):\n testtasks = TestTask.objects.all()\n if request.method == 'GET':\n return render(request, 'task_manage.html', {'type': 'list',\n 'testtasks': testtasks})\n else:\n return HttpResponse('404')\n\n\ndef add_task(request):\n if request.method == 'GET':\n return render(request, 'add_task.html', {'type': 'add'})\n else:\n return HttpResponse('404')\n\n\ndef run_task(request, tid):\n if request.method == 'GET':\n task_obj = TestTask.objects.get(id=tid)\n cases_list = task_obj.cases.split(',')\n cases_list.pop(-1)\n task_obj.status = 1\n task_obj.save()\n print(cases_list)\n all_cases_dict = {}\n for case_id in cases_list:\n case_obj = TestCase.objects.get(id=case_id)\n case_dict = {'url': case_obj.url, 'method': case_obj.req_method,\n 'type_': case_obj.req_type, 'header': case_obj.req_header,\n 'parameter': case_obj.req_parameter, 'assert_': case_obj.\n resp_assert}\n all_cases_dict[case_obj.id] = case_dict\n print(all_cases_dict)\n cases_str = json.dumps(all_cases_dict)\n cases_data_file = TASK_PATH + 'cases_data.json'\n print(cases_data_file)\n with open(cases_data_file, 'w+') as f:\n f.write(cases_str)\n os.system('python3 ' + RUN_TASK_FILE)\n return HttpResponseRedirect('/interface/task_manage')\n else:\n return HttpResponse('404')\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom interface_app.models import TestTask, TestCase\nfrom interface_app.extend.task_run import run_cases\nimport os\nimport json\nfrom interface_app.apps import TASK_PATH, RUN_TASK_FILE\n<mask token>\n\n\ndef task_manage(request):\n testtasks = TestTask.objects.all()\n if request.method == 'GET':\n return render(request, 'task_manage.html', {'type': 'list',\n 'testtasks': testtasks})\n else:\n return HttpResponse('404')\n\n\ndef add_task(request):\n if request.method == 'GET':\n return render(request, 'add_task.html', {'type': 'add'})\n else:\n return HttpResponse('404')\n\n\ndef run_task(request, tid):\n if request.method == 'GET':\n task_obj = TestTask.objects.get(id=tid)\n cases_list = task_obj.cases.split(',')\n cases_list.pop(-1)\n task_obj.status = 1\n task_obj.save()\n print(cases_list)\n all_cases_dict = {}\n for case_id in cases_list:\n case_obj = TestCase.objects.get(id=case_id)\n case_dict = {'url': case_obj.url, 'method': case_obj.req_method,\n 'type_': case_obj.req_type, 'header': case_obj.req_header,\n 'parameter': case_obj.req_parameter, 'assert_': case_obj.\n resp_assert}\n all_cases_dict[case_obj.id] = case_dict\n print(all_cases_dict)\n cases_str = json.dumps(all_cases_dict)\n cases_data_file = TASK_PATH + 'cases_data.json'\n print(cases_data_file)\n with open(cases_data_file, 'w+') as f:\n f.write(cases_str)\n os.system('python3 ' + RUN_TASK_FILE)\n return HttpResponseRedirect('/interface/task_manage')\n else:\n return HttpResponse('404')\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom interface_app.models import TestTask, TestCase\nfrom interface_app.extend.task_run import run_cases\nimport os \nimport json\nfrom interface_app.apps import TASK_PATH, RUN_TASK_FILE\n\n\n\"\"\"\n说明:接口任务文件,返回HTML页面\n\"\"\"\n\n# 获取任务列表\ndef task_manage(request):\n testtasks = TestTask.objects.all()\n \n if request.method == \"GET\":\n return render(request, \"task_manage.html\", {\n \"type\": \"list\",\n \"testtasks\": testtasks,\n })\n else:\n return HttpResponse(\"404\")\n\n\n# 创建任务\ndef add_task(request):\n if request.method == \"GET\":\n return render(request, \"add_task.html\", {\n \"type\": \"add\",\n })\n else:\n return HttpResponse(\"404\")\n\n\n# 运行任务\ndef run_task(request, tid):\n if request.method == \"GET\":\n task_obj = TestTask.objects.get(id=tid)\n cases_list = task_obj.cases.split(\",\")\n cases_list.pop(-1)\n\n task_obj.status = 1 # 修改状态\n task_obj.save()\n\n \n print(cases_list)\n # run_cases() #运行函数\n all_cases_dict = {}\n for case_id in cases_list:\n case_obj = TestCase.objects.get(id=case_id)\n case_dict = {\n \"url\": case_obj.url,\n \"method\": case_obj.req_method,\n \"type_\": case_obj.req_type,\n \"header\": case_obj.req_header,\n \"parameter\": case_obj.req_parameter,\n \"assert_\": case_obj.resp_assert\n } \n all_cases_dict[case_obj.id] = case_dict\n\n print(all_cases_dict)\n\n cases_str = json.dumps(all_cases_dict)\n\n cases_data_file = TASK_PATH + \"cases_data.json\"\n print(cases_data_file)\n\n with open(cases_data_file, \"w+\") as f:\n f.write(cases_str)\n\n # 运行测试\n os.system(\"python3 \" + RUN_TASK_FILE)\n \n return HttpResponseRedirect(\"/interface/task_manage\")\n else:\n return HttpResponse(\"404\")\n\n\n# 如何去运行这些用例?--单元测试框架 + 数据驱动\n\n# unittest + ddt\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import reddit
import tts
import sys
import praw
import os
#TODO: CENSOR CURSE WORDS,tag images that have curse words in them. strip punctuation from comment replies mp3
#TODO: pay for ads :thinking: buy views?
#TODO: sort by top upvotes
#todo: remove the formatting stuff
#todo: redo ducking
#todo quick script to get high upvote replies
#todo: remove hyperlinks
POST_ID = sys.argv[1]
NUM_POSTS = int(sys.argv[2])
reddit_object = praw.Reddit(
client_id="aAhfCgWHCGOylw",
client_secret="FLrVvWquolZc4cnKaEhULqzfUYsxQQ",
user_agent='reddit_to_vid')
print(f"NOW PROCESSING POST ID: {POST_ID}")
comments_from_post,post_title = reddit.get_top_comments_from_id(reddit_object,POST_ID,NUM_POSTS)
tts.comment_to_mp3(post_title,'./quota.txt','titles',0,randomize=True)
n = 1
for comment in comments_from_post:
tts.comment_to_mp3(comment,"./quota.txt",POST_ID,n,randomize=True)
n+=1
tts.comment_to_mp3("Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!","./quota.txt",'duck',1,randomize=True)
|
normal
|
{
"blob_id": "fd57e13269ca00ed5eb05e00bd7999c041141187",
"index": 4256,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\n<mask token>\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\n<mask token>\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-3": "<mask token>\nPOST_ID = sys.argv[1]\nNUM_POSTS = int(sys.argv[2])\nreddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=\n 'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\ncomments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,\n POST_ID, NUM_POSTS)\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\nn = 1\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-4": "import reddit\nimport tts\nimport sys\nimport praw\nimport os\nPOST_ID = sys.argv[1]\nNUM_POSTS = int(sys.argv[2])\nreddit_object = praw.Reddit(client_id='aAhfCgWHCGOylw', client_secret=\n 'FLrVvWquolZc4cnKaEhULqzfUYsxQQ', user_agent='reddit_to_vid')\nprint(f'NOW PROCESSING POST ID: {POST_ID}')\ncomments_from_post, post_title = reddit.get_top_comments_from_id(reddit_object,\n POST_ID, NUM_POSTS)\ntts.comment_to_mp3(post_title, './quota.txt', 'titles', 0, randomize=True)\nn = 1\nfor comment in comments_from_post:\n tts.comment_to_mp3(comment, './quota.txt', POST_ID, n, randomize=True)\n n += 1\ntts.comment_to_mp3(\n \"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\"\n , './quota.txt', 'duck', 1, randomize=True)\n",
"step-5": "import reddit\r\nimport tts\r\nimport sys\r\nimport praw\r\nimport os\r\n\r\n#TODO: CENSOR CURSE WORDS,tag images that have curse words in them. strip punctuation from comment replies mp3\r\n#TODO: pay for ads :thinking: buy views?\r\n#TODO: sort by top upvotes\r\n#todo: remove the formatting stuff\r\n#todo: redo ducking\r\n#todo quick script to get high upvote replies\r\n#todo: remove hyperlinks\r\n\r\nPOST_ID = sys.argv[1]\r\nNUM_POSTS = int(sys.argv[2])\r\n\r\nreddit_object = praw.Reddit(\r\n client_id=\"aAhfCgWHCGOylw\",\r\n client_secret=\"FLrVvWquolZc4cnKaEhULqzfUYsxQQ\",\r\n user_agent='reddit_to_vid')\r\n\r\n\r\nprint(f\"NOW PROCESSING POST ID: {POST_ID}\")\r\ncomments_from_post,post_title = reddit.get_top_comments_from_id(reddit_object,POST_ID,NUM_POSTS)\r\ntts.comment_to_mp3(post_title,'./quota.txt','titles',0,randomize=True)\r\nn = 1\r\nfor comment in comments_from_post:\r\n tts.comment_to_mp3(comment,\"./quota.txt\",POST_ID,n,randomize=True)\r\n n+=1\r\ntts.comment_to_mp3(\"Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!\",\"./quota.txt\",'duck',1,randomize=True)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == '__main__':
parser = ArgumentParserRGBDSegmentation(description=
'Efficient RGBD Indoor Sematic Segmentation (Inference)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_root', type=str, default=HOME +
'/bags/june_25th/kinect_rgbd_person_act', help=
'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'
)
parser.set_common_args()
parser.add_argument('--ckpt_path', type=str, default=
'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=
'Path to the checkpoint of the trained model.')
parser.add_argument('--depth_scale', type=float, default=1.0, help=
'Additional depth scaling factor to apply.')
args = parser.parse_args()
args.pretrained_on_imagenet = False
dataset, preprocessor = prepare_data(args, with_input_orig=True)
n_classes = dataset.n_classes_without_void
model, device = build_model(args, n_classes=n_classes)
checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,
loc: storage)
model.load_state_dict(checkpoint['state_dict'])
print('Loaded checkpoint from {}'.format(args.ckpt_path))
model.eval()
model.to(device)
rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))
depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))
assert args.modality == 'rgbd', 'Only RGBD inference supported so far'
assert len(rgb_filepaths) == len(depth_filepaths)
filepaths = zip(rgb_filepaths, depth_filepaths)
for fp_rgb, fp_depth in filepaths:
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale
h, w, _ = img_rgb.shape
sample = preprocessor({'image': img_rgb, 'depth': img_depth})
image = sample['image'][None].to(device)
depth = sample['depth'][None].to(device)
pred = model(image, depth)
pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False
)
pred = torch.argmax(pred, dim=1)
pred = pred.cpu().numpy().squeeze().astype(np.uint8)
pred_colored = dataset.color_label(pred, with_void=False)
fig, axs = plt.subplots(1, 3, figsize=(16, 3))
[ax.set_axis_off() for ax in axs.ravel()]
axs[0].imshow(img_rgb)
axs[1].imshow(img_depth, cmap='gray')
axs[2].imshow(pred_colored)
plt.suptitle(
f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'
)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
HOME = os.environ['HOME']
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == '__main__':
parser = ArgumentParserRGBDSegmentation(description=
'Efficient RGBD Indoor Sematic Segmentation (Inference)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_root', type=str, default=HOME +
'/bags/june_25th/kinect_rgbd_person_act', help=
'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'
)
parser.set_common_args()
parser.add_argument('--ckpt_path', type=str, default=
'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=
'Path to the checkpoint of the trained model.')
parser.add_argument('--depth_scale', type=float, default=1.0, help=
'Additional depth scaling factor to apply.')
args = parser.parse_args()
args.pretrained_on_imagenet = False
dataset, preprocessor = prepare_data(args, with_input_orig=True)
n_classes = dataset.n_classes_without_void
model, device = build_model(args, n_classes=n_classes)
checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,
loc: storage)
model.load_state_dict(checkpoint['state_dict'])
print('Loaded checkpoint from {}'.format(args.ckpt_path))
model.eval()
model.to(device)
rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))
depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))
assert args.modality == 'rgbd', 'Only RGBD inference supported so far'
assert len(rgb_filepaths) == len(depth_filepaths)
filepaths = zip(rgb_filepaths, depth_filepaths)
for fp_rgb, fp_depth in filepaths:
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale
h, w, _ = img_rgb.shape
sample = preprocessor({'image': img_rgb, 'depth': img_depth})
image = sample['image'][None].to(device)
depth = sample['depth'][None].to(device)
pred = model(image, depth)
pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False
)
pred = torch.argmax(pred, dim=1)
pred = pred.cpu().numpy().squeeze().astype(np.uint8)
pred_colored = dataset.color_label(pred, with_void=False)
fig, axs = plt.subplots(1, 3, figsize=(16, 3))
[ax.set_axis_off() for ax in axs.ravel()]
axs[0].imshow(img_rgb)
axs[1].imshow(img_depth, cmap='gray')
axs[2].imshow(pred_colored)
plt.suptitle(
f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'
)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import argparse
from glob import glob
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from src.args import ArgumentParserRGBDSegmentation
from src.build_model import build_model
from src.prepare_data import prepare_data
HOME = os.environ['HOME']
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == '__main__':
parser = ArgumentParserRGBDSegmentation(description=
'Efficient RGBD Indoor Sematic Segmentation (Inference)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_root', type=str, default=HOME +
'/bags/june_25th/kinect_rgbd_person_act', help=
'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'
)
parser.set_common_args()
parser.add_argument('--ckpt_path', type=str, default=
'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=
'Path to the checkpoint of the trained model.')
parser.add_argument('--depth_scale', type=float, default=1.0, help=
'Additional depth scaling factor to apply.')
args = parser.parse_args()
args.pretrained_on_imagenet = False
dataset, preprocessor = prepare_data(args, with_input_orig=True)
n_classes = dataset.n_classes_without_void
model, device = build_model(args, n_classes=n_classes)
checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,
loc: storage)
model.load_state_dict(checkpoint['state_dict'])
print('Loaded checkpoint from {}'.format(args.ckpt_path))
model.eval()
model.to(device)
rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))
depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))
assert args.modality == 'rgbd', 'Only RGBD inference supported so far'
assert len(rgb_filepaths) == len(depth_filepaths)
filepaths = zip(rgb_filepaths, depth_filepaths)
for fp_rgb, fp_depth in filepaths:
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale
h, w, _ = img_rgb.shape
sample = preprocessor({'image': img_rgb, 'depth': img_depth})
image = sample['image'][None].to(device)
depth = sample['depth'][None].to(device)
pred = model(image, depth)
pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False
)
pred = torch.argmax(pred, dim=1)
pred = pred.cpu().numpy().squeeze().astype(np.uint8)
pred_colored = dataset.color_label(pred, with_void=False)
fig, axs = plt.subplots(1, 3, figsize=(16, 3))
[ax.set_axis_off() for ax in axs.ravel()]
axs[0].imshow(img_rgb)
axs[1].imshow(img_depth, cmap='gray')
axs[2].imshow(pred_colored)
plt.suptitle(
f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'
)
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <[email protected]>
"""
import argparse
from glob import glob
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from src.args import ArgumentParserRGBDSegmentation
from src.build_model import build_model
from src.prepare_data import prepare_data
HOME = os.environ["HOME"]
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == "__main__":
# arguments
parser = ArgumentParserRGBDSegmentation(
description="Efficient RGBD Indoor Sematic Segmentation (Inference)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data_root",
type=str,
default=HOME + "/bags/june_25th/kinect_rgbd_person_act",
help="Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth",
)
parser.set_common_args()
parser.add_argument(
"--ckpt_path",
type=str,
default="trained_models/nyuv2/r34_NBt1D_scenenet.pth",
# default="trained_models/sunrgbd/r34_NBt1D.pth",
help="Path to the checkpoint of the trained model.",
)
parser.add_argument(
"--depth_scale",
type=float,
default=1.0,
help="Additional depth scaling factor to apply.",
)
args = parser.parse_args()
# dataset
# TODO: set dataset to be sunrgbd
# args.dataset = "sunrgbd"
args.pretrained_on_imagenet = False # we are loading other weights anyway
dataset, preprocessor = prepare_data(args, with_input_orig=True)
n_classes = dataset.n_classes_without_void
# model and checkpoint loading
model, device = build_model(args, n_classes=n_classes)
checkpoint = torch.load(args.ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
print("Loaded checkpoint from {}".format(args.ckpt_path))
model.eval()
model.to(device)
# get samples
rgb_filepaths = sorted(glob(os.path.join(args.data_root, "color/*.jpg")))
depth_filepaths = sorted(glob(os.path.join(args.data_root, "depth/*.png")))
assert args.modality == "rgbd", "Only RGBD inference supported so far"
assert len(rgb_filepaths) == len(depth_filepaths)
filepaths = zip(rgb_filepaths, depth_filepaths)
# inference
for fp_rgb, fp_depth in filepaths:
# load sample
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype("float32") * args.depth_scale
h, w, _ = img_rgb.shape
# preprocess sample
sample = preprocessor({"image": img_rgb, "depth": img_depth})
# add batch axis and copy to device
image = sample["image"][None].to(device)
depth = sample["depth"][None].to(device)
# apply network
pred = model(image, depth)
pred = F.interpolate(pred, (h, w), mode="bilinear", align_corners=False)
pred = torch.argmax(pred, dim=1)
pred = pred.cpu().numpy().squeeze().astype(np.uint8)
# show result
pred_colored = dataset.color_label(pred, with_void=False)
fig, axs = plt.subplots(1, 3, figsize=(16, 3))
[ax.set_axis_off() for ax in axs.ravel()]
axs[0].imshow(img_rgb)
axs[1].imshow(img_depth, cmap="gray")
axs[2].imshow(pred_colored)
plt.suptitle(
f"Image: ({os.path.basename(fp_rgb)}, "
f"{os.path.basename(fp_depth)}), Model: {args.ckpt_path}"
)
# plt.savefig('./result.jpg', dpi=150)
plt.show()
|
flexible
|
{
"blob_id": "559e46aa4e9b55f8c01acf30fa01e106ab914116",
"index": 5687,
"step-1": "<mask token>\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-3": "<mask token>\nHOME = os.environ['HOME']\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-4": "<mask token>\nimport argparse\nfrom glob import glob\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom src.args import ArgumentParserRGBDSegmentation\nfrom src.build_model import build_model\nfrom src.prepare_data import prepare_data\nHOME = os.environ['HOME']\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n.. codeauthor:: Daniel Seichter <[email protected]>\n\"\"\"\nimport argparse\nfrom glob import glob\nimport os\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\n\nfrom src.args import ArgumentParserRGBDSegmentation\nfrom src.build_model import build_model\nfrom src.prepare_data import prepare_data\n\nHOME = os.environ[\"HOME\"]\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == \"__main__\":\n # arguments\n parser = ArgumentParserRGBDSegmentation(\n description=\"Efficient RGBD Indoor Sematic Segmentation (Inference)\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--data_root\",\n type=str,\n default=HOME + \"/bags/june_25th/kinect_rgbd_person_act\",\n help=\"Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth\",\n )\n parser.set_common_args()\n parser.add_argument(\n \"--ckpt_path\",\n type=str,\n default=\"trained_models/nyuv2/r34_NBt1D_scenenet.pth\",\n # default=\"trained_models/sunrgbd/r34_NBt1D.pth\",\n help=\"Path to the checkpoint of the trained model.\",\n )\n parser.add_argument(\n \"--depth_scale\",\n type=float,\n default=1.0,\n help=\"Additional depth scaling factor to apply.\",\n )\n args = parser.parse_args()\n\n # dataset\n # TODO: set dataset to be sunrgbd\n # args.dataset = \"sunrgbd\"\n args.pretrained_on_imagenet = False # we are loading other weights anyway\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n\n # model and checkpoint loading\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\"Loaded checkpoint from {}\".format(args.ckpt_path))\n\n model.eval()\n model.to(device)\n\n # get samples\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, \"color/*.jpg\")))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, \"depth/*.png\")))\n assert args.modality == \"rgbd\", \"Only RGBD inference supported so far\"\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n\n # inference\n for fp_rgb, fp_depth in filepaths:\n # load sample\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype(\"float32\") * args.depth_scale\n h, w, _ = img_rgb.shape\n\n # preprocess sample\n sample = preprocessor({\"image\": img_rgb, \"depth\": img_depth})\n\n # add batch axis and copy to device\n image = sample[\"image\"][None].to(device)\n depth = sample[\"depth\"][None].to(device)\n\n # apply network\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode=\"bilinear\", align_corners=False)\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n\n # show result\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap=\"gray\")\n axs[2].imshow(pred_colored)\n\n plt.suptitle(\n f\"Image: ({os.path.basename(fp_rgb)}, \"\n f\"{os.path.basename(fp_depth)}), Model: {args.ckpt_path}\"\n )\n # plt.savefig('./result.jpg', dpi=150)\n plt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
# In[19]:
import numpy as np
import pandas as pd
class simple_nn():
'''
This is simple nn class with 3 layers NN. In this class additional layer was added to the original layers
from notebook given by Julian Stier and Sahib Julka.
Moreover those functions were refactored so that final class would look more concise
and easier to read.
Additionaly optimization were done to work with multiclassification tasks (i.e > than 2 classes)
-----------------------------------------------------------------------------------------------
OUTPUT:
weights that must be used to call predict method of the class
loss_res - list that consist of loss value calculated during training steps
accuracy_res - list that consist of accuracy value calculated during training steps
-----------------------------------------------------------------------------------------------
INPUT:
creating a class examplar:
simple_nn(input_dim, output_dim, lr, num_epochs, decay_rate)
where: input_dim - input dimention of NN ,
output_dim - output dimention of NN,
lr -learnin rate,
num_epochs - number of epochs to iterate over
decay_rate - decay rate for learning rate
For example:
model = simple_nn(2, 2, 0.01, 2, 0.5)
Once model is initialized, we can call train method
train(x, y, nn_hdim, batch_size)
where: x, y are self-explanatory,
nn_hdim - num of neurons in hidden layer,
batch_size - size of batch wich will be used to split the data in each epoch
For example:
weights, loss_res, accuracy_res = model.train(X_train, y_train, 10, batch_size=50)
---------------------------------------------------------------------------------------
PREDICT:
Once model is trained it will return weights or also called "model".
Having weights and x is sufficient to execute prediction with simple NN.
Prediction will return predicted classes for the given inputs:
y_hat = model.predict(weights, X_test)
'''
def __init__(self, nn_input_dim, nn_output_dim, lr, epochs, decay_rate):
self.nn_input_dim = nn_input_dim # input layer dimensionality
self.nn_output_dim = nn_output_dim # output layer dimensionality
self.lr_init = lr # learning rate for gradient descent
self.epochs = epochs
self.decay_rate = decay_rate # decay rate for calculating learninng rate decay
self.reg_lambda = 0.01 # regularization strength
def init_weights(self, nn_hdim):
np.random.seed(0)
# when we initialize weights we normalise them by sqrt(n of input)
# that has been empirically proved to improve the rate of convergence
self.W1 = np.random.rand(self.nn_input_dim, nn_hdim)/ np.sqrt(self.nn_input_dim)
self.b1 = np.random.rand(1, nn_hdim)
self.W2 = np.random.rand(nn_hdim, nn_hdim)/ np.sqrt(nn_hdim)
self.b2 = np.random.rand(1, nn_hdim)
# W3 and b3 are added as here we are having +1 layer
self.W3 = np.random.rand(nn_hdim, self.nn_output_dim)/ np.sqrt(nn_hdim)
self.b3 = np.random.rand(1, self.nn_output_dim)
return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3
# sigmoid and sigmoid derivative have been added to this NN
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def sigmoid_deriv(self, x):
f = 1/(1+np.exp(-x))
df = f * (1 - f)
return df
def softmax(self, x):
exp_scores = np.exp(x)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return probs
def tanh_deriv(self, x):
return 1 - np.power(x, 2)
def lr_decay(self, epoch):
lr = self.lr_init/(1+self.decay_rate * epoch)
return lr
def forward_prop(self, W1, b1, W2, b2, W3, b3, x):
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
# layer 2 was added, i.e z2 and a2
z2 = a1.dot(W2) + b2
a2 = self.sigmoid(z2)
z3 = a2.dot(W3) + b3
a3 = self.softmax(z3)
return z1, a1, z2, a2, z3, a3
def backward_prop(self, z1, a1, z2, a2, z3, a3, W1, W2, W3, x, y):
delta4 = a3
# so delta 4 is error that we want to dissiminate to W3, W2, W1
# assigning to errors -1 ?
delta4[range(self.batch_size), y] -= 1
dW3 = (a2.T).dot(delta4)
db3 = np.sum(delta4, axis=0, keepdims=True)
# delta3 = error * by W3 * by sigmoid derivative
delta3 = delta4.dot(W3.T) * self.sigmoid_deriv(a2)
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
# shouldn't we pass z1 to tanh_derivative?
delta2 = delta3.dot(W2.T) * self.tanh_deriv(a1)
dW1 = np.dot(x.T, delta2)
db1 = np.sum(delta2, axis=0)
return dW1, db1, dW2, db2, dW3, db3
def params_update(self, W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3):
dW3 += self.reg_lambda * W3
dW2 += self.reg_lambda * W2
dW1 += self.reg_lambda * W1
W1 += -self.lr * dW1
b1 += -self.lr * db1
W2 += -self.lr * dW2
b2 += -self.lr * db2
W3 += -self.lr * dW3
b3 += -self.lr * db3
return W1, b1, W2, b2, W3, b3
def train(self, X, y, nn_hdim, batch_size):
# Initialize the parameters to random values. We need to learn these.
W1, b1, W2, b2, W3, b3 = self.init_weights(nn_hdim)
self.batch_size = batch_size
loss_res = []
accuracy_res = []
# This is what we return at the end
self.model = {}
# defining number of batches
num_batches = X.shape[0]//self.batch_size
# Gradient descent
for epoch in range(0, self.epochs):
print('epochs', epoch)
if epoch == 0:
self.lr = self.lr_init
else:
self.lr = self.lr_decay(epoch)
for batch_num in range(num_batches):
print('batch_num', batch_num)
# slicing batch data
start = batch_num * self.batch_size
end = (batch_num + 1) * self.batch_size
self.x_batched = X[start:end]
self.y_batched = np.array(y[start:end])
# training model by applying forward, backwar propagation and updating weithgs
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, self.x_batched)
dW1, db1, dW2, db2, dW3, db3 = self.backward_prop(z1, a1, z2, a2, z3, a3, W1, W2, W3, self.x_batched, self.y_batched)
W1, b1, W2, b2, W3, b3 = self.params_update(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3)
# Assign new parameters to the model
self.model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}
# IMPORTANT
# to compute loss value and accuracy we should use new weights and the same batch of x and y data
loss, acc = self.metrics(W1, W2, W3, b1, b2, b3, self.x_batched, self.y_batched)
loss_res.append(loss)
accuracy_res.append(acc)
return self.model, loss_res, accuracy_res
def metrics(self, W1, W2, W3, b1, b2, b3, X, y):
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, X)
loss = self.calculate_loss(a3, y, W1, W2, W3)
acc = self.calculate_accuracy(a3, y)
return loss, acc
def calculate_loss(self, a3, y, W1, W2, W3):
corect_logprobs = -np.log(a3[range(self.batch_size), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))+np.sum(np.square(W3)))
#print('loss a2',1./self.batch_size * data_loss)
return 1./self.batch_size * data_loss
def calculate_accuracy(self, a3, y_true):
y_hat = np.argmax(a3, axis=1)
correct = sum(y_true == y_hat)
incorrect = len(y_true) - correct
return correct/len(y_true)*100
def predict(self, model, x):
W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']
# Forward propagation
z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, x)
return np.argmax(a3, axis=1)
|
normal
|
{
"blob_id": "cdc32e7c767097a0eb0def71e55f0276982d6a96",
"index": 5235,
"step-1": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[19]:\n\n\nimport numpy as np\nimport pandas as pd\n\nclass simple_nn():\n '''\n This is simple nn class with 3 layers NN. In this class additional layer was added to the original layers\n from notebook given by Julian Stier and Sahib Julka.\n Moreover those functions were refactored so that final class would look more concise\n and easier to read. \n Additionaly optimization were done to work with multiclassification tasks (i.e > than 2 classes)\n -----------------------------------------------------------------------------------------------\n OUTPUT:\n weights that must be used to call predict method of the class\n loss_res - list that consist of loss value calculated during training steps\n accuracy_res - list that consist of accuracy value calculated during training steps \n -----------------------------------------------------------------------------------------------\n INPUT:\n creating a class examplar:\n \n simple_nn(input_dim, output_dim, lr, num_epochs, decay_rate)\n \n where: input_dim - input dimention of NN , \n output_dim - output dimention of NN, \n lr -learnin rate, \n num_epochs - number of epochs to iterate over \n decay_rate - decay rate for learning rate \n For example: \n model = simple_nn(2, 2, 0.01, 2, 0.5)\n \n Once model is initialized, we can call train method \n train(x, y, nn_hdim, batch_size)\n where: x, y are self-explanatory, \n nn_hdim - num of neurons in hidden layer,\n batch_size - size of batch wich will be used to split the data in each epoch\n \n For example: \n weights, loss_res, accuracy_res = model.train(X_train, y_train, 10, batch_size=50)\n ---------------------------------------------------------------------------------------\n PREDICT:\n Once model is trained it will return weights or also called \"model\".\n Having weights and x is sufficient to execute prediction with simple NN.\n Prediction will return predicted classes for the given inputs:\n \n y_hat = model.predict(weights, X_test) \n '''\n \n def __init__(self, nn_input_dim, nn_output_dim, lr, epochs, decay_rate):\n \n self.nn_input_dim = nn_input_dim # input layer dimensionality\n self.nn_output_dim = nn_output_dim # output layer dimensionality\n \n self.lr_init = lr # learning rate for gradient descent\n self.epochs = epochs\n self.decay_rate = decay_rate # decay rate for calculating learninng rate decay \n self.reg_lambda = 0.01 # regularization strength\n\n def init_weights(self, nn_hdim):\n np.random.seed(0)\n # when we initialize weights we normalise them by sqrt(n of input)\n # that has been empirically proved to improve the rate of convergence \n \n self.W1 = np.random.rand(self.nn_input_dim, nn_hdim)/ np.sqrt(self.nn_input_dim)\n self.b1 = np.random.rand(1, nn_hdim)\n self.W2 = np.random.rand(nn_hdim, nn_hdim)/ np.sqrt(nn_hdim)\n self.b2 = np.random.rand(1, nn_hdim)\n \n # W3 and b3 are added as here we are having +1 layer \n self.W3 = np.random.rand(nn_hdim, self.nn_output_dim)/ np.sqrt(nn_hdim)\n self.b3 = np.random.rand(1, self.nn_output_dim) \n \n return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3\n \n # sigmoid and sigmoid derivative have been added to this NN\n def sigmoid(self, x):\n return 1/(1+np.exp(-x))\n \n def sigmoid_deriv(self, x):\n f = 1/(1+np.exp(-x))\n df = f * (1 - f)\n return df\n \n def softmax(self, x):\n exp_scores = np.exp(x)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n return probs\n \n def tanh_deriv(self, x):\n return 1 - np.power(x, 2)\n \n def lr_decay(self, epoch):\n lr = self.lr_init/(1+self.decay_rate * epoch)\n return lr\n \n def forward_prop(self, W1, b1, W2, b2, W3, b3, x):\n # Forward propagation\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n \n # layer 2 was added, i.e z2 and a2\n z2 = a1.dot(W2) + b2\n a2 = self.sigmoid(z2) \n \n z3 = a2.dot(W3) + b3\n a3 = self.softmax(z3)\n\n return z1, a1, z2, a2, z3, a3\n \n def backward_prop(self, z1, a1, z2, a2, z3, a3, W1, W2, W3, x, y):\n \n delta4 = a3 \n # so delta 4 is error that we want to dissiminate to W3, W2, W1\n # assigning to errors -1 ?\n delta4[range(self.batch_size), y] -= 1\n \n dW3 = (a2.T).dot(delta4)\n db3 = np.sum(delta4, axis=0, keepdims=True)\n \n # delta3 = error * by W3 * by sigmoid derivative\n delta3 = delta4.dot(W3.T) * self.sigmoid_deriv(a2)\n \n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n \n # shouldn't we pass z1 to tanh_derivative? \n delta2 = delta3.dot(W2.T) * self.tanh_deriv(a1)\n \n dW1 = np.dot(x.T, delta2)\n db1 = np.sum(delta2, axis=0)\n \n return dW1, db1, dW2, db2, dW3, db3\n \n def params_update(self, W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3):\n \n dW3 += self.reg_lambda * W3\n dW2 += self.reg_lambda * W2\n dW1 += self.reg_lambda * W1\n \n W1 += -self.lr * dW1\n b1 += -self.lr * db1\n W2 += -self.lr * dW2\n b2 += -self.lr * db2\n W3 += -self.lr * dW3\n b3 += -self.lr * db3\n \n return W1, b1, W2, b2, W3, b3 \n \n def train(self, X, y, nn_hdim, batch_size):\n \n # Initialize the parameters to random values. We need to learn these.\n\n W1, b1, W2, b2, W3, b3 = self.init_weights(nn_hdim) \n self.batch_size = batch_size\n loss_res = []\n accuracy_res = []\n \n # This is what we return at the end\n self.model = {}\n \n # defining number of batches \n num_batches = X.shape[0]//self.batch_size\n \n # Gradient descent\n for epoch in range(0, self.epochs):\n \n print('epochs', epoch)\n if epoch == 0:\n self.lr = self.lr_init\n else:\n self.lr = self.lr_decay(epoch)\n \n for batch_num in range(num_batches):\n print('batch_num', batch_num)\n \n # slicing batch data\n start = batch_num * self.batch_size\n end = (batch_num + 1) * self.batch_size\n self.x_batched = X[start:end]\n self.y_batched = np.array(y[start:end])\n \n # training model by applying forward, backwar propagation and updating weithgs \n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, self.x_batched)\n dW1, db1, dW2, db2, dW3, db3 = self.backward_prop(z1, a1, z2, a2, z3, a3, W1, W2, W3, self.x_batched, self.y_batched)\n W1, b1, W2, b2, W3, b3 = self.params_update(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3)\n \n # Assign new parameters to the model\n self.model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3}\n \n # IMPORTANT\n # to compute loss value and accuracy we should use new weights and the same batch of x and y data \n loss, acc = self.metrics(W1, W2, W3, b1, b2, b3, self.x_batched, self.y_batched)\n loss_res.append(loss)\n accuracy_res.append(acc)\n\n return self.model, loss_res, accuracy_res\n\n def metrics(self, W1, W2, W3, b1, b2, b3, X, y):\n \n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, X)\n loss = self.calculate_loss(a3, y, W1, W2, W3)\n acc = self.calculate_accuracy(a3, y)\n return loss, acc\n \n def calculate_loss(self, a3, y, W1, W2, W3):\n\n corect_logprobs = -np.log(a3[range(self.batch_size), y])\n data_loss = np.sum(corect_logprobs)\n # Add regulatization term to loss (optional)\n data_loss += self.reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))+np.sum(np.square(W3)))\n #print('loss a2',1./self.batch_size * data_loss)\n return 1./self.batch_size * data_loss \n\n def calculate_accuracy(self, a3, y_true):\n\n y_hat = np.argmax(a3, axis=1)\n correct = sum(y_true == y_hat)\n incorrect = len(y_true) - correct\n return correct/len(y_true)*100\n \n def predict(self, model, x):\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'], model['W3'], model['b3']\n # Forward propagation\n z1, a1, z2, a2, z3, a3 = self.forward_prop(W1, b1, W2, b2, W3, b3, x)\n return np.argmax(a3, axis=1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
import logging
import datetime
import os
import time
import json
import prod
import secret
from logging.handlers import RotatingFileHandler
import requests
import sns
from kafka import KafkaProducer
logger = logging.getLogger()
logger.setLevel('INFO')
log_path = os.path.basename(__file__).split('.')[0] + '.log'
handler = RotatingFileHandler(
log_path, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer():
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(
bootstrap_servers=kafka_hosts,
acks=1,
compression_type='snappy',
retries=5,
linger_ms=200,
batch_size=1000,
request_timeout_ms=100000,
sasl_plain_username=kafka_uname,
sasl_plain_password=kafka_pwd,
security_protocol="SASL_SSL",
sasl_mechanism="PLAIN",
# sasl_mechanism="SCRAM-SHA-512",
ssl_cafile=ssl_truststore_file,
api_version=(0, 10, 1)
)
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret(
'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets["kafka_hosts"]
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - \
datetime.timedelta(minutes=minutes_before)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
qs = {"created_after": twenty_minutes_ago, "created_before": fifteen_minutes_ago, "expand_events": "false"}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns("proofpoint_trap")
logger.error(f"Error for TRAP API call: {str(e)}")
if __name__ == "__main__":
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info("No logs for TRAP call.")
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before))
|
normal
|
{
"blob_id": "283b93437072f0fd75d75dab733ecab05dc9e1f3",
"index": 3872,
"step-1": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger.setLevel('INFO')\n<mask token>\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-4": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\nhandler = RotatingFileHandler(log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s')\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-5": "#!/usr/bin/env python3\nimport logging\nimport datetime\nimport os\nimport time\nimport json\n\nimport prod\nimport secret\nfrom logging.handlers import RotatingFileHandler\nimport requests\nimport sns\nfrom kafka import KafkaProducer\n\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\n\nhandler = RotatingFileHandler(\n log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n \"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer():\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n\n self.topic_name = topic\n\n self.producer = KafkaProducer(\n bootstrap_servers=kafka_hosts,\n acks=1,\n compression_type='snappy',\n retries=5,\n linger_ms=200,\n batch_size=1000,\n request_timeout_ms=100000,\n sasl_plain_username=kafka_uname,\n sasl_plain_password=kafka_pwd,\n security_protocol=\"SASL_SSL\",\n sasl_mechanism=\"PLAIN\",\n # sasl_mechanism=\"SCRAM-SHA-512\",\n ssl_cafile=ssl_truststore_file,\n api_version=(0, 10, 1)\n )\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret(\n 'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets[\"kafka_hosts\"]\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')\n raise e\n\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - \\\n datetime.timedelta(minutes=minutes_before)\n\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n\n qs = {\"created_after\": twenty_minutes_ago, \"created_before\": fifteen_minutes_ago, \"expand_events\": \"false\"}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n\n json_object = r.json()\n print(json_object)\n return json_object\n\n except Exception as e:\n sns.generate_sns(\"proofpoint_trap\")\n logger.error(f\"Error for TRAP API call: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info(\"No logs for TRAP call.\")\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
# Generated by Django 3.0.4 on 2020-03-29 09:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio_app', '0008_feedback_product'),
]
operations = [
migrations.RemoveField(
model_name='feedback',
name='date',
),
migrations.RemoveField(
model_name='feedback',
name='product',
),
]
|
normal
|
{
"blob_id": "11ad3e1ab4ffd491e27998a7235b7e18857632ed",
"index": 3141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('portfolio_app', '0008_feedback_product')]\n operations = [migrations.RemoveField(model_name='feedback', name='date'\n ), migrations.RemoveField(model_name='feedback', name='product')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('portfolio_app', '0008_feedback_product')]\n operations = [migrations.RemoveField(model_name='feedback', name='date'\n ), migrations.RemoveField(model_name='feedback', name='product')]\n",
"step-5": "# Generated by Django 3.0.4 on 2020-03-29 09:27\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('portfolio_app', '0008_feedback_product'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='feedback',\r\n name='date',\r\n ),\r\n migrations.RemoveField(\r\n model_name='feedback',\r\n name='product',\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function, unicode_literals
import sys
import ast
import os
import tokenize
import warnings
from io import StringIO
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between.
"""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class CodeGenException(Exception):
""" Generic exception for errors raised in code generation """
pass
class CodeGenerator:
"""Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded. """
# represents built in functions
pythonbuiltins = ["abs", "float", "int"]
# basic types
basic_arg_types = ['float', 'int']
# supported math constansts
mathconsts = {"pi": "M_PI",
"e": "M_E",
"inf": "INFINITY",
"nan": "NAN",
}
# support for most numpy types except complex numbers and float>64bit
numpytypes = {"byte": "char",
"ubyte": "unsigned char",
"short": "short",
"ushort": "unsigned short",
"intc": "int",
"uintc": "unsigned int",
"uint": "unisgned int",
"longlong": "long long",
"ulonglong": "unsigned long long",
"half": "half", # cuda supported
"single": "float",
"double": "double",
"longdouble": "long double",
"bool_": "bool",
"bool8": "bool",
# sized aliases
"int_": "long",
"int8": "int8_t",
"int16": "int16_t",
"int32": "int32_t",
"int64": "int64_t",
"intp": "intptr_t",
"uint_": "long",
"uint8": "uint8_t",
"uint16": "uint16_t",
"uint32": "uint32_t",
"uint64": "uint64_t",
"uintp": "uintptr_t",
"float_": "float",
"float16": "half",
"float32": "float",
"float64": "double"
}
# getVariableType and setVariableType functions are added dynamically
fgpu_funcs = [ "getID", "getStepCounter", "getIndex" ]
fgpu_attrs = ["ALIVE", "DEAD"]
fgpu_input_msg_funcs = ["radius", "at"] # functions that can be called on message_in that do NOT return iterators
fgpu_input_msg_iter_funcs = ["wrap", "vn", "vn_wrap"] # functions that can be called on message_in that do return iterators
fgpu_input_msg_iter_var_funcs = ["getIndex", "getVirtualX", "getVirtualY", "getVirtualZ"]
fgpu_output_msg_funcs = ["setLocation", "setKey", "setIndex"]
fgpu_agent_out_msg_funcs = ["getID"]
fgpu_env_funcs = ["containsProperty", "containsMacroProperty"]
fgpu_env_macro_funcs = ["exchange", "CAS", "min", "max"]
fgpu_rand_funcs = []
fgpu_message_types = ["pyflamegpu.MessageNone", "pyflamegpu.MessageBruteForce", "pyflamegpu.MessageBucket", "pyflamegpu.MessageSpatial2D", "pyflamegpu.MessageSpatial3D", "pyflamegpu.MessageArray", "pyflamegpu.MessageArray2D", "pyflamegpu.MessageArray3D"]
_fgpu_types = {"Float": "float",
"Double": "double",
"Int": "int",
"UInt": "unsigned int",
"Int8": "int_8",
"UInt8": "uint_8",
"Char": "char",
"UChar": "unsigned char",
"Int16": "int_16",
"UInt16": "uint_16",
"Int32": "int_32",
"UInt32": "uint_32",
"Int64": "int_64",
"UInt64": "uint_64"
}
def __init__(self, tree, file = sys.stdout):
"""CodeGenerator(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = []
self._indent = 0
# dict of locals used to determine if variable already exists in assignments
self._locals = ["pyflamegpu"]
self._device_functions = []
self._message_iterator_var = None # default
self._input_message_var = 'message_in' # default
self._output_message_var = 'message_out' # default
self.dispatch(tree)
print("", file=self.f)
self.f.flush()
def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):
"""
Gets the device function name by translating a typed Python version to a templated cpp version.
Python functions looks like getVariableFloatArray6 and translate to getVariable<float, 6>
This function will detect and test against a set of known types and also extract the Array length
This function returns None if the string is invalid in format but only throws an error if the format is correct but the type is invalid.
"""
cpp_func_name = ""
py_func = tree.attr
# extract function name start
for prefix in permitted_prefixes:
if py_func.startswith(prefix):
cpp_func_name = prefix
py_func = py_func[len(prefix):]
break # dont allow the else
else:
return None
# check type and lengths
if allow_lengths:
#split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings)
type_and_length = py_func.split("Array")
if type_and_length[0] not in self._fgpu_types:
self.RaiseError(tree, f"'{type_and_length[0]}' is not a valid FLAME GPU type")
t = self._fgpu_types[type_and_length[0]]
# generate template args
if (len(type_and_length) == 1):
cpp_func_name += f"<{t}>"
elif (len(type_and_length) == 2):
cpp_func_name += f"<{t}, {type_and_length[1]}>"
else:
return None
else:
if py_func not in self._fgpu_types:
self.RaiseError(tree, f"'{py_func}' is not a valid FLAME GPU type")
t = self._fgpu_types[py_func]
cpp_func_name += f"<{t}>"
# return
return cpp_func_name
def fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
self.f.write("\n"+" "*self._indent + text)
def write(self, text):
"Append a piece of text to the current line."
self.f.write(str(text))
def enter(self):
"Print '{', and increase the indentation."
self.write("{")
self._indent += 1
def leave(self):
"Decrease the indentation level and Print '}'"
self._indent -= 1
self.fill("}")
def dispatch(self, tree):
"Dispatcher function, dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
meth(tree)
def RaiseWarning(self, tree, str):
warnings.warn(f"Warning ({tree.lineno}, {tree.col_offset}): {str}")
def RaiseError(self, tree, str):
raise CodeGenException(f"Error ({tree.lineno}, {tree.col_offset}): {str}")
############### Cutsom Unparsing methods ###############
# These are special versions of the ast unparsing #
# dispatch functions. #
########################################################
def dispatchMacroEnvFunction(self, tree, tree_parent):
"""
Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))
"""
cpp_func_name = "getMacroProperty"
py_func = tree.attr
# extract type from function name
py_type = py_func[len(cpp_func_name):]
if py_type not in self._fgpu_types:
self.RaiseError(tree, f"'{py_type}' is not a valid FLAME GPU type")
# get cpp type
t = self._fgpu_types[py_type]
cpp_func_name += f"<{t}"
# mess with the parent to extract (and remove arguments so they dont end up in the argument list)
if not tree_parent.args :
self.RaiseError(tree, f" Macro environment function '{py_func}' is expected to have some arguments.")
# if more than one arg then the rest are bounds to translate
if len(tree_parent.args) > 1:
bounds = tree_parent.args[1:]
# process bounds by appending to cpp function template arguments
for i in bounds:
if isinstance(i, ast.Num): # num required for python 3.7
if not isinstance(i.n, int):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.")
cpp_func_name += f", {i.n}"
else: # all Python > 3.7
if not isinstance(i, ast.Constant):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).")
if not isinstance(i.value, int):
self.RaiseError(tree, f" Macro environment function argument '{i}' should be an integer value.")
cpp_func_name += f", {i.value}"
# remove bounds from argument list (in place)
del tree_parent.args[1:]
cpp_func_name += ">"
self.write(cpp_func_name)
def dispatchFGPUFunctionArgs(self, tree):
"""
Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`
Type hinting is required to translate a type into a FLAME GPU Message type implementation
"""
# reset the locals variable stack
self._locals = ["pyflamegpu"]
if len(tree.args.args) != 2:
self.RaiseError(tree, "Expected two FLAME GPU function arguments (input message and output message)")
# input message
if not tree.args.args[0].annotation:
self.RaiseError(tree.args.args[0], "Message input requires a supported type annotation")
if not isinstance(tree.args.args[0].annotation, ast.Attribute):
self.RaiseError(tree.args.args[0], "Message input type annotation should be an attribute of the form pyflamegpu.MessageType")
if not isinstance(tree.args.args[0].annotation.value, ast.Name):
self.RaiseError(tree.args.args[0], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
input_message_attr = tree.args.args[0].annotation.value.id + "." + tree.args.args[0].annotation.attr
if input_message_attr not in self.fgpu_message_types:
self.RaiseError(tree.args.args[0], "Message input type annotation not a supported message type")
self._input_message_var = tree.args.args[0].arg # store the message input variable name
self.write(f"flamegpu::{tree.args.args[0].annotation.attr}") # requires namespace
self.write(", ")
# output message
if not tree.args.args[1].annotation:
self.RaiseError(tree.args.args[1], "Message output requires a supported type annotation")
if not isinstance(tree.args.args[1].annotation, ast.Attribute):
self.RaiseError(tree.args.args[1], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
if not isinstance(tree.args.args[1].annotation.value, ast.Name):
self.RaiseError(tree.args.args[1], "Message output type annotation should be an attribute of the form pyflamegpu.MessageType")
output_message_attr = tree.args.args[1].annotation.value.id + "." + tree.args.args[1].annotation.attr
if output_message_attr not in self.fgpu_message_types:
self.RaiseError(tree.args.args[1], "Message output type annotation not a supported message type")
self._output_message_var = tree.args.args[1].arg # store the message output variable name
self.write(f"flamegpu::{tree.args.args[1].annotation.attr}") # requires namespace
def dispatchType(self, tree):
"""
There is a limited set of types and formats of type description supported. Types can be either;
1) A python built in type of int or float, or
2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16
This function translates and a catches unsupported types but does not translate a function call (i.e. cast)
"""
if isinstance(tree, ast.Name):
if tree.id not in self.basic_arg_types:
self.RaiseError(tree, "Not a supported type")
self.write(tree.id)
elif isinstance(tree, ast.Attribute):
if not isinstance(tree.value, ast.Name) :
self.RaiseError(tree, "Not a supported type")
if not (tree.value.id == "numpy" or tree.value.id == "np"):
self.RaiseError(tree, "Not a supported type")
if tree.attr not in self.numpytypes:
self.RaiseError(tree, "Not a supported numpy type")
self.write(self.numpytypes[tree.attr])
else:
self.RaiseError(tree, "Not a supported type")
def dispatchFGPUDeviceFunctionArgs(self, tree):
"""
Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.
"""
# reset the locals variable stack
self._locals = ["pyflamegpu"]
# input message
first = True
annotation = None
for arg in tree.args.args:
# ensure that there is a type annotation
if not arg.annotation:
self.RaiseError(arg, "Device function argument requires type annotation")
# comma if not first
if not first:
self.write(", ")
self.dispatchType(arg.annotation)
self.write(f" {arg.arg}")
# add arg to local variable stack
self._locals.append(arg.arg)
first = False
def dispatchMessageIteratorCall(self, tree):
"""
Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())
Using this function avoid using the global call one which may accept member function calls to things that are not iterators.
"""
# simple case not a member function just an iterator with arguments
if isinstance(tree.func, ast.Name):
self.write(f"FLAMEGPU->{tree.func.id}")
if isinstance(tree.func, ast.Attribute) :
if isinstance(tree.func.value, ast.Name):
# check that the iterator is supported
if not tree.func.attr in self.fgpu_input_msg_iter_funcs:
self.RaiseError(tree, f"Message input loop iterator '{tree.func.attr}' is not supported.")
self.write(f"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}")
else:
self.RaiseError(tree, "Message input loop iterator format incorrect.")
# handle function arguments
self.write("(")
self._CallArguments(tree)
self.write(")")
def dispatchMessageLoop(self, tree):
"""
This is a special case of a range based for loop in which iterator item returns a const referecne to the message.
Any user specified message value can be used.
"""
self.fill("for (const auto& ")
self.dispatch(tree.target)
self.write(" : ")
# if simple message iterator
if isinstance(tree.iter, ast.Name):
if not tree.iter.id == self._input_message_var:
self.RaiseError(t, f"Message input loop requires use of '{self._input_message_var}' as iterator.")
# write with prefix
self.write(f"FLAMEGPU->{self._input_message_var}")
# if it is a call then handle the different cases
elif isinstance(tree.iter, ast.Call):
self.dispatchMessageIteratorCall(tree.iter)
#otherwise not supported
else :
self.RaiseError(tree, f"Message input loop iterator in unsupported format")
self.write(")")
self._message_iterator_var = tree.target.id
self.enter()
self.dispatch(tree.body)
self.leave()
self._message_iterator_var = None
def dispatchMemberFunction(self, t, t_parent):
"""
A very limited set of function calls to members are supported so these are fully evaluated here.
t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)
Function calls permitted are;
* pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.
* message_input.function - a call to the message input variable (the name of which is specified in the function definition)
* msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)
* message_output.function - a call to the message output variable (the name of which is specified in the function definition)
* pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.
* math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`
* numpy.type - Any numpy types are translated to static casts
"""
# it could be possible that the Call object has no value property e.g. a()()
if not hasattr(t, "value"):
self.RaiseError(t, f"Function call is in an unsupported format.")
# Nested member functions (e.g. x.y.z())
if isinstance(t.value, ast.Attribute):
# store some information about the source of this function call in parent as this may be useful for validation in whatever has called this function
t_parent.call_type = None
# only nested attribute type is environment
if not isinstance(t.value.value, ast.Name):
self.RaiseError(t, "Unknown or unsupported nested attribute")
# pyflamegpu.environment
if t.value.value.id == "pyflamegpu" and t.value.attr == "environment":
# check it is a supported environment function
self.write("FLAMEGPU->environment.")
if t.attr in self.fgpu_env_funcs:
# proceed
self.write(t.attr)
else:
# simple getProperty type function
if t.attr.startswith('getProperty') :
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getProperty"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment property function.")
# write the getProperty type function
self.write(py_func)
t_parent.call_type = "Environment"
# need to catch case of getMacroProperty as arguments need to be translated into template parameters in cpp (and py_func can be ignored)
elif t.attr.startswith("getMacroProperty"):
# possible getter setter type function (Note: getMacroProperty only supports a subset of types but type checking is not performed. This is best left to the compiler.)
# no not permit lengths (e.g. Float4) as these will be passed as arguments
py_func = self._deviceVariableFunctionName(t, ["getMacroProperty"], allow_lengths=False)
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.")
# handle case
self.dispatchMacroEnvFunction(t, t_parent)
t_parent.call_type = "MacroEnvironment"
else:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.environment object")
# pyflamegpu.random
elif t.value.value.id == "pyflamegpu" and t.value.attr == "random":
# check it is a supported random function
self.write("FLAMEGPU->random.")
if t.attr in self.fgpu_rand_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["uniform", "normal", "logNormal"], allow_lengths=False)
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.random object")
# proceed
self.write(py_func)
t_parent.call_type = "Random"
elif t.value.value.id == "pyflamegpu" and t.value.attr == "agent_out":
# check it is a supported agent_out function
self.write("FLAMEGPU->agent_out.")
if t.attr in self.fgpu_agent_out_msg_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu.agent_out object")
# proceed
self.write(py_func)
t_parent.call_type = "AgentOut"
else:
self.RaiseError(t, f"Unknown or unsupported nested attribute in {t.value.value.id}")
# Non nested member functions (e.g. x.y())
elif isinstance(t.value, ast.Name):
# pyflamegpu singleton
if t.value.id == "pyflamegpu":
# check for legit FGPU function calls
self.write("FLAMEGPU->")
if t.attr in self.fgpu_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getVariable", "setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in pyflamegpu object")
# proceed
self.write(py_func)
# message_in function using whatever variable was named in function declaration (e.g radius)
elif t.value.id == self._input_message_var:
# only process functions on message_in that are not iterators
if t.attr in self.fgpu_input_msg_funcs:
self.write(f"FLAMEGPU->{self._input_message_var}.")
self.write(t.attr)
else:
self.RaiseError(t, f"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'")
# message input iterator arg
elif self._message_iterator_var and t.value.id == self._message_iterator_var:
self.write(f"{self._message_iterator_var}.")
# check for legit FGPU function calls and translate
if t.attr in self.fgpu_input_msg_iter_var_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["getVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object")
# proceed
self.write(py_func)
# message output arg
elif t.value.id == self._output_message_var:
# check for legit FGPU function calls and translate
self.write("FLAMEGPU->message_out.")
if t.attr in self.fgpu_output_msg_funcs:
# proceed
self.write(t.attr)
else:
# possible getter setter type function
py_func = self._deviceVariableFunctionName(t, ["setVariable"])
if not py_func:
self.RaiseError(t, f"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object")
# proceed
self.write(py_func)
# math functions (try them in raw function call format) or constants
elif t.value.id == "math":
self.write(t.attr)
# numpy types
elif t.value.id == "numpy" or t.value.id == "np":
if t.attr in self.numpytypes:
self.write(f"static_cast<{self.numpytypes[t.attr]}>")
else:
self.RaiseError(t, f"Unsupported numpy type {t.attr}")
# allow any call on any locals (too many cases to enforce without type checking)
elif t.value.id in self._locals:
self.write(f"{t.value.id}.{t.attr}")
else:
self.RaiseError(t, f"Global '{t.value.id}' identifier not supported")
# Call is a very nested situation which can occur only on macro environment properties. E.g. 'pyflamegpu.environment.getMacroPropertyInt('a').exchange(10)'
elif isinstance(t.value, ast.Call):
# handle the call by recursively calling this function to do the depth first execution of pyflamegpu.environment.getMacroPropertyInt('a')
self.dispatchMemberFunction(t.value.func, t.value)
# check that the handler was actually for macro environment
if t.value.call_type != "MacroEnvironment" :
self.RaiseError(t, f"Function call {t.attr} is not supported")
# now append the outer call by making sure the thing been called is a valid macro env function
if not t.attr in self.fgpu_env_macro_funcs:
self.RaiseError(t, f"Function {t.attr} is not a valid macro environment function")
# write inner call args
self.write("(")
self._CallArguments(t.value)
self.write(")")
# write outer function (call args will be completed by _Call)
self.write(f".{t.attr}")
else:
self.RaiseError(t, "Unsupported function call syntax")
############### Unparsing methods ######################
# There should be one method per concrete grammar type #
# Constructors should be grouped by sum type. Ideally, #
# this would follow the order in the grammar, but #
# currently doesn't. #
########################################################
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Interactive(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Expression(self, tree):
self.dispatch(tree.body)
# stmt
def _Expr(self, tree):
"""
Same as a standard python expression but ends with semicolon
"""
# Catch odd case of multi line strings and doc strings which are Expr with a Constant string type value
if isinstance(tree.value, ast.Constant):
if isinstance(tree.value.value, str):
return
# catch special case of Python 3.7 Where doc string is a Str and not a Constant
elif isinstance(tree.value, ast.Str):
return
# otherwise treat like a normal expression
self.fill()
self.dispatch(tree.value)
self.write(";")
def _NamedExpr(self, tree):
"""
No such concept in C++. Standard assignment can be used in any location.
"""
self.write("(")
self.dispatch(tree.target)
self.write(" = ")
self.dispatch(tree.value)
self.write(")")
def _Import(self, t):
self.RaiseError(t, "Importing of modules not supported")
def _ImportFrom(self, t):
self.RaiseError(t, "Importing of modules not supported")
def _Assign(self, t):
"""
Assignment will use the auto type to define a variable at first use else will perform standard assignment.
Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.
Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.
"""
if len(t.targets) > 1:
self.RaiseError(t, "Assignment to multiple targets not supported")
if not isinstance(t.targets[0], ast.Name):
self.RaiseError(t, "Assignment to complex expressions not supported")
self.fill()
# check if target exists in locals
if t.targets[0].id not in self._locals :
self.write("auto ")
self._locals.append(t.targets[0].id)
self.dispatch(t.targets[0])
self.write(" = ")
self.dispatch(t.value)
self.write(";")
def _AugAssign(self, t):
"""
Similar to assignment in terms of restrictions. E.g. Allow only single named variable assignments.
Also requires the named variable to already exist in scope.
"""
if not isinstance(t.target, ast.Name):
self.RaiseError(t, "Augmented assignment to complex expressions not supported")
# check if target exists in locals
if t.target.id not in self._locals :
self.RaiseError(t, "Augmented assignment not permitted on variables not already assigned previously")
self.fill()
self.dispatch(t.target)
self.write(" "+self.binop[t.op.__class__.__name__]+"= ")
self.dispatch(t.value)
self.write(";")
def _AnnAssign(self, t):
if not isinstance(t.target, ast.Name):
self.RaiseError(t, "Augmented assignment to complex expressions not supported")
self.fill()
self.dispatchType(t.annotation)
self.write(" ")
self.dispatch(t.target)
if t.value:
self.write(" = ")
self.dispatch(t.value)
self.write(";")
def _Return(self, t):
"""
Standard cpp like return with semicolon.
"""
self.fill("return")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(";")
def _Pass(self, t):
self.fill(";")
def _Break(self, t):
self.fill("break;")
def _Continue(self, t):
self.fill("continue;")
def _Delete(self, t):
self.RaiseError(t, "Deletion not supported")
def _Assert(self, t):
"""
cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy
"""
self.RaiseError(t, "Assert not supported")
def _Exec(self, t):
self.RaiseError(t, "Exec not supported")
def _Print(self, t):
"""
This is old school python printing so no need to support
"""
self.RaiseError(t, "Print not supported")
def _Global(self, t):
self.RaiseError(t, "Use of 'global' not supported")
def _Nonlocal(self, t):
self.RaiseError(t, "Use of 'nonlocal' not supported")
def _Await(self, t):
self.RaiseError(t, "Await not supported")
def _Yield(self, t):
self.RaiseError(t, "Yield not supported")
def _YieldFrom(self, t):
self.RaiseError(t, "Yield from not supported")
def _Raise(self, t):
"""
Exceptions are obviously supported in cpp but not in CUDA device code
"""
self.RaiseError(t, "Exception raising not supported")
def _Try(self, t):
self.RaiseError(t, "Exceptions not supported")
def _TryExcept(self, t):
self.RaiseError(t, "Exceptions not supported")
def _TryFinally(self, t):
self.RaiseError(t, "Exceptions not supported")
def _ExceptHandler(self, t):
self.RaiseError(t, "Exceptions not supported")
def _ClassDef(self, t):
self.RaiseError(t, "Class definitions not supported")
def _FunctionDef(self, t):
"""
Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'.
Each is then processed in a different way using a specific dispatcher.
Function calls are actually checked and only permitted (or user defined) function calls are supported.
"""
self.write("\n")
# check decorators
if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):
self.RaiseError(t, "Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'")
# FLAMEGPU_AGENT_FUNCTION
if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':
if getattr(t, "returns", False):
self.RaiseWarning(t, "Function definition return type not supported on 'pyflamegpu.agent_function'")
self.fill(f"FLAMEGPU_AGENT_FUNCTION({t.name}, ")
self.dispatchFGPUFunctionArgs(t)
self.write(")")
# FLAMEGPU_DEVICE_FUNCTION
elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':
self.fill(f"FLAMEGPU_DEVICE_FUNCTION ")
if t.returns:
self.dispatchType(t.returns)
else:
self.write("void")
self.write(f" {t.name}(")
self.dispatchFGPUDeviceFunctionArgs(t)
self.write(")")
# add to list of defined functions that can be called
self._device_functions.append(t.name)
# FLAMEGPU_DEVICE_FUNCTION
elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':
# check for return annotation
if not hasattr(t, "returns"):
self.RaiseError(t, "Agent function conditions must have a 'bool' return type specified as a return type annotation")
# check for return annotation type
if not isinstance(t.returns, ast.Name):
self.RaiseError(t, "Agent function conditions return type must be 'bool'")
if t.returns.id is not 'bool':
self.RaiseError(t, "Agent function conditions return type must be 'bool'")
# check to ensure no arguments (discard any with a warning)
if t.args.args:
self.RaiseWarning(t, "Agent function conditions does not support arguments. These will be discarded.")
# write the agent function macro
self.fill(f"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})")
else:
self.RaiseError(t, "Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'")
self.enter()
self.dispatch(t.body)
self.leave()
def _AsyncFunctionDef(self, t):
self.RaiseError(t, "Async functions not supported")
def _For(self, t):
"""
Two type for for loop are supported. Either;
1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'
2) A range based for loop with 1 to 3 arguments which is converted into a c style loop
"""
# if message loop then process differently
if isinstance(t.iter, ast.Name):
if t.iter.id == self._input_message_var:
self.dispatchMessageLoop(t)
else:
self.RaiseError(t, "Range based for loops only support message iteration using 'message_in' iterator")
# do not support for else
elif t.orelse:
self.RaiseError(t, "For else not supported")
# allow calls but only to range function
elif isinstance(t.iter, ast.Call):
# simple function call e.g. message_in() or range()
if isinstance(t.iter.func, ast.Name):
# catch case of message_input with arguments (e.g. spatial messaging)
if t.iter.func.id == self._input_message_var:
self.dispatchMessageLoop(t)
# otherwise permit only range based for loops
elif t.iter.func.id == "range":
# switch on different uses of range based on number of arguments
if len(t.iter.args) == 1:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=0;")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("++)")
elif len(t.iter.args) == 2:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[1])
self.write(";")
self.dispatch(t.target)
self.write("++)")
elif len(t.iter.args) == 3:
self.fill(f"for (int ")
self.dispatch(t.target)
self.write("=")
self.dispatch(t.iter.args[0])
self.write(";")
self.dispatch(t.target)
self.write("<")
self.dispatch(t.iter.args[1])
self.write(";")
self.dispatch(t.target)
self.write("+=")
self.dispatch(t.iter.args[2])
self.write(")")
else:
self.RaiseError(t, "Range based for loops requires use of 'range' function with arguments and not keywords")
self.enter()
self.dispatch(t.body)
self.leave()
else:
self.RaiseError(t, "Range based for loops only support calls to the 'range' function")
# member function call can only be on message_in.func() type call.
elif isinstance(t.iter.func, ast.Attribute):
# must be an attribute (e.g. calling a member of message_in)
if t.iter.func.value.id == self._input_message_var:
self.dispatchMessageLoop(t)
else:
self.RaiseError(t, "Range based for loops only support calling members of message input variable")
else:
self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'")
else:
self.RaiseError(t, "Range based for loops only support message iteration or use of 'range'")
def _AsyncFor(self, t):
self.RaiseError(t, "Async for not supported")
def _If(self, t):
"""
Fairly straightforward translation to if, else if, else format
"""
self.fill("if (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while (t.orelse and len(t.orelse) == 1 and
isinstance(t.orelse[0], ast.If)):
t = t.orelse[0]
self.fill("else if (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
"""
Straightforward translation to c style while loop
"""
self.fill("while (")
self.dispatch(t.test)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.RaiseError(t, "While else not supported")
def _With(self, t):
self.RaiseError(t, "With not supported")
def _AsyncWith(self, t):
self.RaiseError(t, "Async with not supported")
# expr
def _Bytes(self, t):
self.RaiseError(t, "Byte strings and Bytes function not supported")
def _Str(self, tree):
# force writing in double quotes
self.write(f'"{tree.s}"')
def _JoinedStr(self, t):
self.RaiseError(t, "Joined strings not supported")
def _FormattedValue(self, t):
self.RaiseError(t, "Formatted strings not supported")
def _fstring_JoinedStr(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_Str(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_Constant(self, t, write):
self.RaiseError(t, "F strings not supported")
def _fstring_FormattedValue(self, t, write):
self.RaiseError(t, "F strings not supported")
def _Name(self, t):
"""
Everything ends up as a Name once it is an identifier
"""
self.write(t.id)
def _NameConstant(self, t):
# Required only for Python 3.7
if t.value == None:
self.write(0)
elif t.value:
self.write("true")
else:
self.write("false")
def _Repr(self, t):
self.RaiseError(t, "Repr not supported")
def _Constant(self, t):
"""
Restrict most types of constant except for numeric types and constant strings
Picks up some obvious conversions such as None and Bools
"""
value = t.value
if isinstance(value, tuple):
self.RaiseError(t, "Tuples not supported")
if isinstance(value, dict):
self.RaiseError(t, "Dictionaries not supported")
if isinstance(value, list):
self.RaiseError(t, "Lists not supported")
elif value is Ellipsis: # instead of `...` for Py2 compatibility
self.RaiseError(t, "Ellipsis not supported")
elif isinstance(value, str):
self.write(f'"{value}"')
elif isinstance(value, (bytes, bytearray)): # reject bytes strings e.g. b'123'
self.RaiseError(t, "Byte strings and Bytes function not supported")
elif isinstance(value, bool):
if value:
self.write("true")
else:
self.write("false")
elif value == None:
self.write(0)
else:
self.write(repr(value))
def _Num(self, t):
self.write(repr(t.n))
def _List(self, t):
self.RaiseError(t, "Lists not supported")
def _ListComp(self, t):
self.RaiseError(t, "List comprehension not supported")
def _GeneratorExp(self, t):
self.RaiseError(t, "Generator expressions not supported")
def _SetComp(self, t):
self.RaiseError(t, "Set comprehension not supported")
def _DictComp(self, t):
self.RaiseError(t, "Dictionary comprehension not supported")
def _comprehension(self, t):
self.RaiseError(t, "Comprehension not supported")
def _IfExp(self, t):
"""
Equivalent to a ternary operator
"""
self.dispatch(t.test)
self.write(" ? ")
self.dispatch(t.body)
self.write(" : ")
self.dispatch(t.orelse)
def _Set(self, t):
self.RaiseError(t, "Sets not supported")
def _Dict(self, t):
self.RaiseError(t, "Dictionaries not supported")
def _Tuple(self, t):
self.RaiseError(t, "Tuples not supported")
unop = {"Invert":"~", "Not": "!", "UAdd":"+", "USub":"-"}
def _UnaryOp(self, t):
"""
Translate to C equivalent opertaors
"""
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.dispatch(t.operand)
self.write(")")
binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%",
"LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
"FloorDiv":"//", "Pow": "**"}
def _BinOp(self, t):
"""
Python style pow and floordiv are not supported so translate to a function call.
No matrix mul support.
"""
op_name = t.op.__class__.__name__
# translate pow into function call (no float version)
if op_name == "Pow":
self.write("pow(")
self.dispatch(t.left)
self.write(", ")
self.dispatch(t.right)
self.write(")")
# translate floor div into function call (no float version)
elif op_name == "FloorDiv":
self.write("floor(")
self.dispatch(t.left)
self.write("/")
self.dispatch(t.right)
self.write(")")
elif op_name == "MatMult":
self.RaiseError(t, "Matrix multiplier operator not supported")
else:
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[op_name] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=",
"Is":"==", "IsNot":"!=", "In":"in", "NotIn":"not in"}
def _Compare(self, t):
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
# detect list ops
if o.__class__.__name__ == "In" or o.__class__.__name__ == "NotIn":
self.RaiseError(t, "In and NotIn operators not supported")
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
boolops = {ast.And: '&&', ast.Or: '||'}
def _BoolOp(self, t):
"""
Translate to logical and/or operators in C
"""
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self,t):
"""
A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.
Attributes supported are only;
* pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.
* math.constant - Any supported math constants are translated to C definition versions
"""
# Only a limited set of globals supported
func_dict = None
# pyflamegpu singleton
if isinstance(t.value, ast.Name):
if t.value.id == "pyflamegpu":
if t.attr in self.fgpu_attrs:
# proceed
self.write("flamegpu::")
self.write(t.attr)
else:
self.RaiseError(t, f"Attribute '{t.attr}' does not exist in pyflamegpu object")
# math functions (try them in raw function call format) or constants
elif t.value.id == "math":
if t.attr in self.mathconsts:
self.write(self.mathconsts[t.attr])
else:
self.RaiseError(t, f"Unsupported math constant '{t.attr}'")
# numpy types
elif t.value.id == "numpy" or t.value.id == "np":
# not sure how a numpy attribute would be used without function call or type hint but translate anyway
if t.attr in self.numpytypes:
self.write(self.numpytypes[t.attr])
else:
self.RaiseError(t, f"Unsupported numpy type {t.attr}")
else:
self.RaiseError(t, f"Global '{t.value.id}' identifiers not supported")
else:
self.RaiseError(t, "Unsupported attribute")
def _CallArguments(self, t):
comma = False
for e in t.args:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
if len(t.keywords):
self.RaiseWarning(t, "Keyword argument not supported. Ignored.")
if sys.version_info[:2] < (3, 5):
if t.starargs:
self.RaiseWarning(t, "Starargs not supported. Ignored.")
if t.kwargs:
self.RaiseWarning(t, "Kwargs not supported. Ignored.")
def _Call(self, t):
"""
Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function.
A special dispatcher is required
"""
# check calls but let attributes check in their own dispatcher
funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator
if isinstance(t.func, ast.Name):
if (t.func.id not in funcs):
self.RaiseWarning(t, "Function call is not a defined FLAME GPU device function or a supported python built in.")
# dispatch even if warning raised
self.dispatch(t.func)
elif isinstance(t.func, ast.Lambda):
self.dispatch(t.func) # not supported
else:
# special handler for dispatching member function calls
# This would otherwise be an attribute
self.dispatchMemberFunction(t.func, t)
self.write("(")
self._CallArguments(t)
self.write(")")
def _Subscript(self, t):
"""
Arrays are not supported but subscript allows accessing array like variables which is required for macro environment properties (e.g. a[0][1][2])
Obvious limitation is no slicing type syntax (e.g. a[:2])
"""
self.dispatch(t.value)
self.write("[")
self.dispatch(t.slice)
self.write("]")
def _Starred(self, t):
self.RaiseError(t, "Starred values not supported")
# slice
def _Ellipsis(self, t):
self.RaiseError(t, "Ellipsis values not supported")
def _Index(self, t):
self.RaiseError(t, "Index values not supported")
def _Slice(self, t):
self.RaiseError(t, "Slicing values not supported")
def _ExtSlice(self, t):
self.RaiseError(t, "ExtSlice values not supported")
# argument
def _arg(self, t):
"""
Arguments should be processed by a custom dispatcher and it should not be possible to get here
"""
self.RaiseError(t, "Arguments should already have been processed")
# others
def _arguments(self, t):
"""
Arguments should be processed by a custom dispatcher and it should not be possible to get here
"""
self.RaiseError(t, "Arguments should already have been processed")
def _keyword(self, t):
self.RaiseError(t, "Keywords are not supported")
def _Lambda(self, t):
self.RaiseError(t, "Lambda is not supported")
def _alias(self, t):
self.RaiseError(t, "Aliasing is not supported")
def _withitem(self, t):
self.RaiseError(t, "With not supported")
|
normal
|
{
"blob_id": "8443d208a6a6bef82240235eeadbf6f8eaf77bcb",
"index": 2995,
"step-1": "<mask token>\n\n\nclass CodeGenerator:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, tree, file=sys.stdout):\n \"\"\"CodeGenerator(tree, file=sys.stdout) -> None.\n Print the source for tree to file.\"\"\"\n self.f = file\n self.future_imports = []\n self._indent = 0\n self._locals = ['pyflamegpu']\n self._device_functions = []\n self._message_iterator_var = None\n self._input_message_var = 'message_in'\n self._output_message_var = 'message_out'\n self.dispatch(tree)\n print('', file=self.f)\n self.f.flush()\n <mask token>\n\n def fill(self, text=''):\n \"\"\"Indent a piece of text, according to the current indentation level\"\"\"\n self.f.write('\\n' + ' ' * self._indent + text)\n <mask token>\n\n def enter(self):\n \"\"\"Print '{', and increase the indentation.\"\"\"\n self.write('{')\n self._indent += 1\n\n def leave(self):\n \"\"\"Decrease the indentation level and Print '}'\"\"\"\n self._indent -= 1\n self.fill('}')\n <mask token>\n\n def RaiseWarning(self, tree, str):\n warnings.warn(f'Warning ({tree.lineno}, {tree.col_offset}): {str}')\n\n def RaiseError(self, tree, str):\n raise CodeGenException(\n f'Error ({tree.lineno}, {tree.col_offset}): {str}')\n <mask token>\n\n def dispatchFGPUFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`\n Type hinting is required to translate a type into a FLAME GPU Message type implementation\n \"\"\"\n self._locals = ['pyflamegpu']\n if len(tree.args.args) != 2:\n self.RaiseError(tree,\n 'Expected two FLAME GPU function arguments (input message and output message)'\n )\n if not tree.args.args[0].annotation:\n self.RaiseError(tree.args.args[0],\n 'Message input requires a supported type annotation')\n if not isinstance(tree.args.args[0].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[0].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[0],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n input_message_attr = tree.args.args[0\n ].annotation.value.id + '.' + tree.args.args[0].annotation.attr\n if input_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation not a supported message type')\n self._input_message_var = tree.args.args[0].arg\n self.write(f'flamegpu::{tree.args.args[0].annotation.attr}')\n self.write(', ')\n if not tree.args.args[1].annotation:\n self.RaiseError(tree.args.args[1],\n 'Message output requires a supported type annotation')\n if not isinstance(tree.args.args[1].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[1].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n output_message_attr = tree.args.args[1\n ].annotation.value.id + '.' + tree.args.args[1].annotation.attr\n if output_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation not a supported message type')\n self._output_message_var = tree.args.args[1].arg\n self.write(f'flamegpu::{tree.args.args[1].annotation.attr}')\n\n def dispatchType(self, tree):\n \"\"\"\n There is a limited set of types and formats of type description supported. Types can be either;\n 1) A python built in type of int or float, or\n 2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16\n This function translates and a catches unsupported types but does not translate a function call (i.e. cast)\n \"\"\"\n if isinstance(tree, ast.Name):\n if tree.id not in self.basic_arg_types:\n self.RaiseError(tree, 'Not a supported type')\n self.write(tree.id)\n elif isinstance(tree, ast.Attribute):\n if not isinstance(tree.value, ast.Name):\n self.RaiseError(tree, 'Not a supported type')\n if not (tree.value.id == 'numpy' or tree.value.id == 'np'):\n self.RaiseError(tree, 'Not a supported type')\n if tree.attr not in self.numpytypes:\n self.RaiseError(tree, 'Not a supported numpy type')\n self.write(self.numpytypes[tree.attr])\n else:\n self.RaiseError(tree, 'Not a supported type')\n <mask token>\n <mask token>\n\n def dispatchMessageLoop(self, tree):\n \"\"\"\n This is a special case of a range based for loop in which iterator item returns a const referecne to the message.\n Any user specified message value can be used.\n \"\"\"\n self.fill('for (const auto& ')\n self.dispatch(tree.target)\n self.write(' : ')\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t,\n f\"Message input loop requires use of '{self._input_message_var}' as iterator.\"\n )\n self.write(f'FLAMEGPU->{self._input_message_var}')\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n else:\n self.RaiseError(tree,\n f'Message input loop iterator in unsupported format')\n self.write(')')\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None\n\n def dispatchMemberFunction(self, t, t_parent):\n \"\"\"\n A very limited set of function calls to members are supported so these are fully evaluated here.\n t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)\n Function calls permitted are;\n * pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.\n * message_input.function - a call to the message input variable (the name of which is specified in the function definition)\n * msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)\n * message_output.function - a call to the message output variable (the name of which is specified in the function definition)\n * pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.\n * math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`\n * numpy.type - Any numpy types are translated to static casts\n \"\"\"\n if not hasattr(t, 'value'):\n self.RaiseError(t, f'Function call is in an unsupported format.')\n if isinstance(t.value, ast.Attribute):\n t_parent.call_type = None\n if not isinstance(t.value.value, ast.Name):\n self.RaiseError(t, 'Unknown or unsupported nested attribute')\n if (t.value.value.id == 'pyflamegpu' and t.value.attr ==\n 'environment'):\n self.write('FLAMEGPU->environment.')\n if t.attr in self.fgpu_env_funcs:\n self.write(t.attr)\n elif t.attr.startswith('getProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getProperty'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment property function.\"\n )\n self.write(py_func)\n t_parent.call_type = 'Environment'\n elif t.attr.startswith('getMacroProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getMacroProperty'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.\"\n )\n self.dispatchMacroEnvFunction(t, t_parent)\n t_parent.call_type = 'MacroEnvironment'\n else:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.environment object\"\n )\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'random':\n self.write('FLAMEGPU->random.')\n if t.attr in self.fgpu_rand_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'uniform', 'normal', 'logNormal'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.random object\"\n )\n self.write(py_func)\n t_parent.call_type = 'Random'\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'agent_out':\n self.write('FLAMEGPU->agent_out.')\n if t.attr in self.fgpu_agent_out_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.agent_out object\"\n )\n self.write(py_func)\n t_parent.call_type = 'AgentOut'\n else:\n self.RaiseError(t,\n f'Unknown or unsupported nested attribute in {t.value.value.id}'\n )\n elif isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n self.write('FLAMEGPU->')\n if t.attr in self.fgpu_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable', 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu object\"\n )\n self.write(py_func)\n elif t.value.id == self._input_message_var:\n if t.attr in self.fgpu_input_msg_funcs:\n self.write(f'FLAMEGPU->{self._input_message_var}.')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'\"\n )\n elif self._message_iterator_var and t.value.id == self._message_iterator_var:\n self.write(f'{self._message_iterator_var}.')\n if t.attr in self.fgpu_input_msg_iter_var_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object\"\n )\n self.write(py_func)\n elif t.value.id == self._output_message_var:\n self.write('FLAMEGPU->message_out.')\n if t.attr in self.fgpu_output_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object\"\n )\n self.write(py_func)\n elif t.value.id == 'math':\n self.write(t.attr)\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(f'static_cast<{self.numpytypes[t.attr]}>')\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n elif t.value.id in self._locals:\n self.write(f'{t.value.id}.{t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifier not supported\")\n elif isinstance(t.value, ast.Call):\n self.dispatchMemberFunction(t.value.func, t.value)\n if t.value.call_type != 'MacroEnvironment':\n self.RaiseError(t, f'Function call {t.attr} is not supported')\n if not t.attr in self.fgpu_env_macro_funcs:\n self.RaiseError(t,\n f'Function {t.attr} is not a valid macro environment function'\n )\n self.write('(')\n self._CallArguments(t.value)\n self.write(')')\n self.write(f'.{t.attr}')\n else:\n self.RaiseError(t, 'Unsupported function call syntax')\n\n def _Module(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Interactive(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Expression(self, tree):\n self.dispatch(tree.body)\n <mask token>\n <mask token>\n <mask token>\n\n def _ImportFrom(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _Assign(self, t):\n \"\"\"\n Assignment will use the auto type to define a variable at first use else will perform standard assignment.\n Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.\n Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.\n \"\"\"\n if len(t.targets) > 1:\n self.RaiseError(t, 'Assignment to multiple targets not supported')\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t,\n 'Assignment to complex expressions not supported')\n self.fill()\n if t.targets[0].id not in self._locals:\n self.write('auto ')\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _Continue(self, t):\n self.fill('continue;')\n <mask token>\n\n def _Assert(self, t):\n \"\"\"\n cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy\n \"\"\"\n self.RaiseError(t, 'Assert not supported')\n <mask token>\n\n def _Print(self, t):\n \"\"\"\n This is old school python printing so no need to support\n \"\"\"\n self.RaiseError(t, 'Print not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _Yield(self, t):\n self.RaiseError(t, 'Yield not supported')\n\n def _YieldFrom(self, t):\n self.RaiseError(t, 'Yield from not supported')\n <mask token>\n <mask token>\n\n def _TryExcept(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryFinally(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n <mask token>\n\n def _ClassDef(self, t):\n self.RaiseError(t, 'Class definitions not supported')\n <mask token>\n\n def _AsyncFunctionDef(self, t):\n self.RaiseError(t, 'Async functions not supported')\n\n def _For(self, t):\n \"\"\"\n Two type for for loop are supported. Either;\n 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'\n 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop\n \"\"\"\n if isinstance(t.iter, ast.Name):\n if t.iter.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration using 'message_in' iterator\"\n )\n elif t.orelse:\n self.RaiseError(t, 'For else not supported')\n elif isinstance(t.iter, ast.Call):\n if isinstance(t.iter.func, ast.Name):\n if t.iter.func.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n elif t.iter.func.id == 'range':\n if len(t.iter.args) == 1:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=0;')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 2:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 3:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('+=')\n self.dispatch(t.iter.args[2])\n self.write(')')\n else:\n self.RaiseError(t,\n \"Range based for loops requires use of 'range' function with arguments and not keywords\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n else:\n self.RaiseError(t,\n \"Range based for loops only support calls to the 'range' function\"\n )\n elif isinstance(t.iter.func, ast.Attribute):\n if t.iter.func.value.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n 'Range based for loops only support calling members of message input variable'\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n <mask token>\n <mask token>\n\n def _While(self, t):\n \"\"\"\n Straightforward translation to c style while loop\n \"\"\"\n self.fill('while (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, 'While else not supported')\n <mask token>\n <mask token>\n\n def _Bytes(self, t):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n <mask token>\n\n def _JoinedStr(self, t):\n self.RaiseError(t, 'Joined strings not supported')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _NameConstant(self, t):\n if t.value == None:\n self.write(0)\n elif t.value:\n self.write('true')\n else:\n self.write('false')\n\n def _Repr(self, t):\n self.RaiseError(t, 'Repr not supported')\n <mask token>\n <mask token>\n\n def _List(self, t):\n self.RaiseError(t, 'Lists not supported')\n <mask token>\n\n def _GeneratorExp(self, t):\n self.RaiseError(t, 'Generator expressions not supported')\n\n def _SetComp(self, t):\n self.RaiseError(t, 'Set comprehension not supported')\n\n def _DictComp(self, t):\n self.RaiseError(t, 'Dictionary comprehension not supported')\n\n def _comprehension(self, t):\n self.RaiseError(t, 'Comprehension not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _Tuple(self, t):\n self.RaiseError(t, 'Tuples not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _BinOp(self, t):\n \"\"\"\n Python style pow and floordiv are not supported so translate to a function call.\n No matrix mul support.\n \"\"\"\n op_name = t.op.__class__.__name__\n if op_name == 'Pow':\n self.write('pow(')\n self.dispatch(t.left)\n self.write(', ')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'FloorDiv':\n self.write('floor(')\n self.dispatch(t.left)\n self.write('/')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'MatMult':\n self.RaiseError(t, 'Matrix multiplier operator not supported')\n else:\n self.write('(')\n self.dispatch(t.left)\n self.write(' ' + self.binop[op_name] + ' ')\n self.dispatch(t.right)\n self.write(')')\n <mask token>\n\n def _Compare(self, t):\n self.dispatch(t.left)\n for o, e in zip(t.ops, t.comparators):\n if o.__class__.__name__ == 'In' or o.__class__.__name__ == 'NotIn':\n self.RaiseError(t, 'In and NotIn operators not supported')\n self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')\n self.dispatch(e)\n <mask token>\n <mask token>\n\n def _Attribute(self, t):\n \"\"\"\n A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.\n Attributes supported are only;\n * pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.\n * math.constant - Any supported math constants are translated to C definition versions\n \"\"\"\n func_dict = None\n if isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n if t.attr in self.fgpu_attrs:\n self.write('flamegpu::')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Attribute '{t.attr}' does not exist in pyflamegpu object\"\n )\n elif t.value.id == 'math':\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, 'Unsupported attribute')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _Ellipsis(self, t):\n self.RaiseError(t, 'Ellipsis values not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _arg(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n <mask token>\n <mask token>\n <mask token>\n\n def _alias(self, t):\n self.RaiseError(t, 'Aliasing is not supported')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CodeGenerator:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, tree, file=sys.stdout):\n \"\"\"CodeGenerator(tree, file=sys.stdout) -> None.\n Print the source for tree to file.\"\"\"\n self.f = file\n self.future_imports = []\n self._indent = 0\n self._locals = ['pyflamegpu']\n self._device_functions = []\n self._message_iterator_var = None\n self._input_message_var = 'message_in'\n self._output_message_var = 'message_out'\n self.dispatch(tree)\n print('', file=self.f)\n self.f.flush()\n <mask token>\n\n def fill(self, text=''):\n \"\"\"Indent a piece of text, according to the current indentation level\"\"\"\n self.f.write('\\n' + ' ' * self._indent + text)\n <mask token>\n\n def enter(self):\n \"\"\"Print '{', and increase the indentation.\"\"\"\n self.write('{')\n self._indent += 1\n\n def leave(self):\n \"\"\"Decrease the indentation level and Print '}'\"\"\"\n self._indent -= 1\n self.fill('}')\n <mask token>\n\n def RaiseWarning(self, tree, str):\n warnings.warn(f'Warning ({tree.lineno}, {tree.col_offset}): {str}')\n\n def RaiseError(self, tree, str):\n raise CodeGenException(\n f'Error ({tree.lineno}, {tree.col_offset}): {str}')\n <mask token>\n\n def dispatchFGPUFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`\n Type hinting is required to translate a type into a FLAME GPU Message type implementation\n \"\"\"\n self._locals = ['pyflamegpu']\n if len(tree.args.args) != 2:\n self.RaiseError(tree,\n 'Expected two FLAME GPU function arguments (input message and output message)'\n )\n if not tree.args.args[0].annotation:\n self.RaiseError(tree.args.args[0],\n 'Message input requires a supported type annotation')\n if not isinstance(tree.args.args[0].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[0].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[0],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n input_message_attr = tree.args.args[0\n ].annotation.value.id + '.' + tree.args.args[0].annotation.attr\n if input_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation not a supported message type')\n self._input_message_var = tree.args.args[0].arg\n self.write(f'flamegpu::{tree.args.args[0].annotation.attr}')\n self.write(', ')\n if not tree.args.args[1].annotation:\n self.RaiseError(tree.args.args[1],\n 'Message output requires a supported type annotation')\n if not isinstance(tree.args.args[1].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[1].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n output_message_attr = tree.args.args[1\n ].annotation.value.id + '.' + tree.args.args[1].annotation.attr\n if output_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation not a supported message type')\n self._output_message_var = tree.args.args[1].arg\n self.write(f'flamegpu::{tree.args.args[1].annotation.attr}')\n\n def dispatchType(self, tree):\n \"\"\"\n There is a limited set of types and formats of type description supported. Types can be either;\n 1) A python built in type of int or float, or\n 2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16\n This function translates and a catches unsupported types but does not translate a function call (i.e. cast)\n \"\"\"\n if isinstance(tree, ast.Name):\n if tree.id not in self.basic_arg_types:\n self.RaiseError(tree, 'Not a supported type')\n self.write(tree.id)\n elif isinstance(tree, ast.Attribute):\n if not isinstance(tree.value, ast.Name):\n self.RaiseError(tree, 'Not a supported type')\n if not (tree.value.id == 'numpy' or tree.value.id == 'np'):\n self.RaiseError(tree, 'Not a supported type')\n if tree.attr not in self.numpytypes:\n self.RaiseError(tree, 'Not a supported numpy type')\n self.write(self.numpytypes[tree.attr])\n else:\n self.RaiseError(tree, 'Not a supported type')\n <mask token>\n\n def dispatchMessageIteratorCall(self, tree):\n \"\"\"\n Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())\n Using this function avoid using the global call one which may accept member function calls to things that are not iterators.\n \"\"\"\n if isinstance(tree.func, ast.Name):\n self.write(f'FLAMEGPU->{tree.func.id}')\n if isinstance(tree.func, ast.Attribute):\n if isinstance(tree.func.value, ast.Name):\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree,\n f\"Message input loop iterator '{tree.func.attr}' is not supported.\"\n )\n self.write(f'FLAMEGPU->{tree.func.value.id}.{tree.func.attr}')\n else:\n self.RaiseError(tree,\n 'Message input loop iterator format incorrect.')\n self.write('(')\n self._CallArguments(tree)\n self.write(')')\n\n def dispatchMessageLoop(self, tree):\n \"\"\"\n This is a special case of a range based for loop in which iterator item returns a const referecne to the message.\n Any user specified message value can be used.\n \"\"\"\n self.fill('for (const auto& ')\n self.dispatch(tree.target)\n self.write(' : ')\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t,\n f\"Message input loop requires use of '{self._input_message_var}' as iterator.\"\n )\n self.write(f'FLAMEGPU->{self._input_message_var}')\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n else:\n self.RaiseError(tree,\n f'Message input loop iterator in unsupported format')\n self.write(')')\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None\n\n def dispatchMemberFunction(self, t, t_parent):\n \"\"\"\n A very limited set of function calls to members are supported so these are fully evaluated here.\n t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)\n Function calls permitted are;\n * pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.\n * message_input.function - a call to the message input variable (the name of which is specified in the function definition)\n * msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)\n * message_output.function - a call to the message output variable (the name of which is specified in the function definition)\n * pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.\n * math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`\n * numpy.type - Any numpy types are translated to static casts\n \"\"\"\n if not hasattr(t, 'value'):\n self.RaiseError(t, f'Function call is in an unsupported format.')\n if isinstance(t.value, ast.Attribute):\n t_parent.call_type = None\n if not isinstance(t.value.value, ast.Name):\n self.RaiseError(t, 'Unknown or unsupported nested attribute')\n if (t.value.value.id == 'pyflamegpu' and t.value.attr ==\n 'environment'):\n self.write('FLAMEGPU->environment.')\n if t.attr in self.fgpu_env_funcs:\n self.write(t.attr)\n elif t.attr.startswith('getProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getProperty'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment property function.\"\n )\n self.write(py_func)\n t_parent.call_type = 'Environment'\n elif t.attr.startswith('getMacroProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getMacroProperty'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.\"\n )\n self.dispatchMacroEnvFunction(t, t_parent)\n t_parent.call_type = 'MacroEnvironment'\n else:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.environment object\"\n )\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'random':\n self.write('FLAMEGPU->random.')\n if t.attr in self.fgpu_rand_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'uniform', 'normal', 'logNormal'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.random object\"\n )\n self.write(py_func)\n t_parent.call_type = 'Random'\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'agent_out':\n self.write('FLAMEGPU->agent_out.')\n if t.attr in self.fgpu_agent_out_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.agent_out object\"\n )\n self.write(py_func)\n t_parent.call_type = 'AgentOut'\n else:\n self.RaiseError(t,\n f'Unknown or unsupported nested attribute in {t.value.value.id}'\n )\n elif isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n self.write('FLAMEGPU->')\n if t.attr in self.fgpu_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable', 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu object\"\n )\n self.write(py_func)\n elif t.value.id == self._input_message_var:\n if t.attr in self.fgpu_input_msg_funcs:\n self.write(f'FLAMEGPU->{self._input_message_var}.')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'\"\n )\n elif self._message_iterator_var and t.value.id == self._message_iterator_var:\n self.write(f'{self._message_iterator_var}.')\n if t.attr in self.fgpu_input_msg_iter_var_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object\"\n )\n self.write(py_func)\n elif t.value.id == self._output_message_var:\n self.write('FLAMEGPU->message_out.')\n if t.attr in self.fgpu_output_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object\"\n )\n self.write(py_func)\n elif t.value.id == 'math':\n self.write(t.attr)\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(f'static_cast<{self.numpytypes[t.attr]}>')\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n elif t.value.id in self._locals:\n self.write(f'{t.value.id}.{t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifier not supported\")\n elif isinstance(t.value, ast.Call):\n self.dispatchMemberFunction(t.value.func, t.value)\n if t.value.call_type != 'MacroEnvironment':\n self.RaiseError(t, f'Function call {t.attr} is not supported')\n if not t.attr in self.fgpu_env_macro_funcs:\n self.RaiseError(t,\n f'Function {t.attr} is not a valid macro environment function'\n )\n self.write('(')\n self._CallArguments(t.value)\n self.write(')')\n self.write(f'.{t.attr}')\n else:\n self.RaiseError(t, 'Unsupported function call syntax')\n\n def _Module(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Interactive(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Expression(self, tree):\n self.dispatch(tree.body)\n <mask token>\n <mask token>\n <mask token>\n\n def _ImportFrom(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _Assign(self, t):\n \"\"\"\n Assignment will use the auto type to define a variable at first use else will perform standard assignment.\n Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.\n Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.\n \"\"\"\n if len(t.targets) > 1:\n self.RaiseError(t, 'Assignment to multiple targets not supported')\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t,\n 'Assignment to complex expressions not supported')\n self.fill()\n if t.targets[0].id not in self._locals:\n self.write('auto ')\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _Continue(self, t):\n self.fill('continue;')\n <mask token>\n\n def _Assert(self, t):\n \"\"\"\n cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy\n \"\"\"\n self.RaiseError(t, 'Assert not supported')\n <mask token>\n\n def _Print(self, t):\n \"\"\"\n This is old school python printing so no need to support\n \"\"\"\n self.RaiseError(t, 'Print not supported')\n <mask token>\n\n def _Nonlocal(self, t):\n self.RaiseError(t, \"Use of 'nonlocal' not supported\")\n <mask token>\n\n def _Yield(self, t):\n self.RaiseError(t, 'Yield not supported')\n\n def _YieldFrom(self, t):\n self.RaiseError(t, 'Yield from not supported')\n <mask token>\n <mask token>\n\n def _TryExcept(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryFinally(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n <mask token>\n\n def _ClassDef(self, t):\n self.RaiseError(t, 'Class definitions not supported')\n <mask token>\n\n def _AsyncFunctionDef(self, t):\n self.RaiseError(t, 'Async functions not supported')\n\n def _For(self, t):\n \"\"\"\n Two type for for loop are supported. Either;\n 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'\n 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop\n \"\"\"\n if isinstance(t.iter, ast.Name):\n if t.iter.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration using 'message_in' iterator\"\n )\n elif t.orelse:\n self.RaiseError(t, 'For else not supported')\n elif isinstance(t.iter, ast.Call):\n if isinstance(t.iter.func, ast.Name):\n if t.iter.func.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n elif t.iter.func.id == 'range':\n if len(t.iter.args) == 1:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=0;')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 2:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 3:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('+=')\n self.dispatch(t.iter.args[2])\n self.write(')')\n else:\n self.RaiseError(t,\n \"Range based for loops requires use of 'range' function with arguments and not keywords\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n else:\n self.RaiseError(t,\n \"Range based for loops only support calls to the 'range' function\"\n )\n elif isinstance(t.iter.func, ast.Attribute):\n if t.iter.func.value.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n 'Range based for loops only support calling members of message input variable'\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n <mask token>\n <mask token>\n\n def _While(self, t):\n \"\"\"\n Straightforward translation to c style while loop\n \"\"\"\n self.fill('while (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, 'While else not supported')\n <mask token>\n <mask token>\n\n def _Bytes(self, t):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n <mask token>\n\n def _JoinedStr(self, t):\n self.RaiseError(t, 'Joined strings not supported')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _NameConstant(self, t):\n if t.value == None:\n self.write(0)\n elif t.value:\n self.write('true')\n else:\n self.write('false')\n\n def _Repr(self, t):\n self.RaiseError(t, 'Repr not supported')\n <mask token>\n <mask token>\n\n def _List(self, t):\n self.RaiseError(t, 'Lists not supported')\n <mask token>\n\n def _GeneratorExp(self, t):\n self.RaiseError(t, 'Generator expressions not supported')\n\n def _SetComp(self, t):\n self.RaiseError(t, 'Set comprehension not supported')\n\n def _DictComp(self, t):\n self.RaiseError(t, 'Dictionary comprehension not supported')\n\n def _comprehension(self, t):\n self.RaiseError(t, 'Comprehension not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _Tuple(self, t):\n self.RaiseError(t, 'Tuples not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _BinOp(self, t):\n \"\"\"\n Python style pow and floordiv are not supported so translate to a function call.\n No matrix mul support.\n \"\"\"\n op_name = t.op.__class__.__name__\n if op_name == 'Pow':\n self.write('pow(')\n self.dispatch(t.left)\n self.write(', ')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'FloorDiv':\n self.write('floor(')\n self.dispatch(t.left)\n self.write('/')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'MatMult':\n self.RaiseError(t, 'Matrix multiplier operator not supported')\n else:\n self.write('(')\n self.dispatch(t.left)\n self.write(' ' + self.binop[op_name] + ' ')\n self.dispatch(t.right)\n self.write(')')\n <mask token>\n\n def _Compare(self, t):\n self.dispatch(t.left)\n for o, e in zip(t.ops, t.comparators):\n if o.__class__.__name__ == 'In' or o.__class__.__name__ == 'NotIn':\n self.RaiseError(t, 'In and NotIn operators not supported')\n self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')\n self.dispatch(e)\n <mask token>\n <mask token>\n\n def _Attribute(self, t):\n \"\"\"\n A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.\n Attributes supported are only;\n * pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.\n * math.constant - Any supported math constants are translated to C definition versions\n \"\"\"\n func_dict = None\n if isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n if t.attr in self.fgpu_attrs:\n self.write('flamegpu::')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Attribute '{t.attr}' does not exist in pyflamegpu object\"\n )\n elif t.value.id == 'math':\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, 'Unsupported attribute')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _Ellipsis(self, t):\n self.RaiseError(t, 'Ellipsis values not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _arg(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n <mask token>\n <mask token>\n <mask token>\n\n def _alias(self, t):\n self.RaiseError(t, 'Aliasing is not supported')\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CodeGenerator:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, tree, file=sys.stdout):\n \"\"\"CodeGenerator(tree, file=sys.stdout) -> None.\n Print the source for tree to file.\"\"\"\n self.f = file\n self.future_imports = []\n self._indent = 0\n self._locals = ['pyflamegpu']\n self._device_functions = []\n self._message_iterator_var = None\n self._input_message_var = 'message_in'\n self._output_message_var = 'message_out'\n self.dispatch(tree)\n print('', file=self.f)\n self.f.flush()\n <mask token>\n\n def fill(self, text=''):\n \"\"\"Indent a piece of text, according to the current indentation level\"\"\"\n self.f.write('\\n' + ' ' * self._indent + text)\n\n def write(self, text):\n \"\"\"Append a piece of text to the current line.\"\"\"\n self.f.write(str(text))\n\n def enter(self):\n \"\"\"Print '{', and increase the indentation.\"\"\"\n self.write('{')\n self._indent += 1\n\n def leave(self):\n \"\"\"Decrease the indentation level and Print '}'\"\"\"\n self._indent -= 1\n self.fill('}')\n <mask token>\n\n def RaiseWarning(self, tree, str):\n warnings.warn(f'Warning ({tree.lineno}, {tree.col_offset}): {str}')\n\n def RaiseError(self, tree, str):\n raise CodeGenException(\n f'Error ({tree.lineno}, {tree.col_offset}): {str}')\n\n def dispatchMacroEnvFunction(self, tree, tree_parent):\n \"\"\"\n Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))\n \"\"\"\n cpp_func_name = 'getMacroProperty'\n py_func = tree.attr\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_type]\n cpp_func_name += f'<{t}'\n if not tree_parent.args:\n self.RaiseError(tree,\n f\" Macro environment function '{py_func}' is expected to have some arguments.\"\n )\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n for i in bounds:\n if isinstance(i, ast.Num):\n if not isinstance(i.n, int):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an integer value.\"\n )\n cpp_func_name += f', {i.n}'\n else:\n if not isinstance(i, ast.Constant):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\"\n )\n if not isinstance(i.value, int):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an integer value.\"\n )\n cpp_func_name += f', {i.value}'\n del tree_parent.args[1:]\n cpp_func_name += '>'\n self.write(cpp_func_name)\n\n def dispatchFGPUFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`\n Type hinting is required to translate a type into a FLAME GPU Message type implementation\n \"\"\"\n self._locals = ['pyflamegpu']\n if len(tree.args.args) != 2:\n self.RaiseError(tree,\n 'Expected two FLAME GPU function arguments (input message and output message)'\n )\n if not tree.args.args[0].annotation:\n self.RaiseError(tree.args.args[0],\n 'Message input requires a supported type annotation')\n if not isinstance(tree.args.args[0].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[0].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[0],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n input_message_attr = tree.args.args[0\n ].annotation.value.id + '.' + tree.args.args[0].annotation.attr\n if input_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation not a supported message type')\n self._input_message_var = tree.args.args[0].arg\n self.write(f'flamegpu::{tree.args.args[0].annotation.attr}')\n self.write(', ')\n if not tree.args.args[1].annotation:\n self.RaiseError(tree.args.args[1],\n 'Message output requires a supported type annotation')\n if not isinstance(tree.args.args[1].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[1].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n output_message_attr = tree.args.args[1\n ].annotation.value.id + '.' + tree.args.args[1].annotation.attr\n if output_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation not a supported message type')\n self._output_message_var = tree.args.args[1].arg\n self.write(f'flamegpu::{tree.args.args[1].annotation.attr}')\n\n def dispatchType(self, tree):\n \"\"\"\n There is a limited set of types and formats of type description supported. Types can be either;\n 1) A python built in type of int or float, or\n 2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16\n This function translates and a catches unsupported types but does not translate a function call (i.e. cast)\n \"\"\"\n if isinstance(tree, ast.Name):\n if tree.id not in self.basic_arg_types:\n self.RaiseError(tree, 'Not a supported type')\n self.write(tree.id)\n elif isinstance(tree, ast.Attribute):\n if not isinstance(tree.value, ast.Name):\n self.RaiseError(tree, 'Not a supported type')\n if not (tree.value.id == 'numpy' or tree.value.id == 'np'):\n self.RaiseError(tree, 'Not a supported type')\n if tree.attr not in self.numpytypes:\n self.RaiseError(tree, 'Not a supported numpy type')\n self.write(self.numpytypes[tree.attr])\n else:\n self.RaiseError(tree, 'Not a supported type')\n\n def dispatchFGPUDeviceFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.\n \"\"\"\n self._locals = ['pyflamegpu']\n first = True\n annotation = None\n for arg in tree.args.args:\n if not arg.annotation:\n self.RaiseError(arg,\n 'Device function argument requires type annotation')\n if not first:\n self.write(', ')\n self.dispatchType(arg.annotation)\n self.write(f' {arg.arg}')\n self._locals.append(arg.arg)\n first = False\n\n def dispatchMessageIteratorCall(self, tree):\n \"\"\"\n Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())\n Using this function avoid using the global call one which may accept member function calls to things that are not iterators.\n \"\"\"\n if isinstance(tree.func, ast.Name):\n self.write(f'FLAMEGPU->{tree.func.id}')\n if isinstance(tree.func, ast.Attribute):\n if isinstance(tree.func.value, ast.Name):\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree,\n f\"Message input loop iterator '{tree.func.attr}' is not supported.\"\n )\n self.write(f'FLAMEGPU->{tree.func.value.id}.{tree.func.attr}')\n else:\n self.RaiseError(tree,\n 'Message input loop iterator format incorrect.')\n self.write('(')\n self._CallArguments(tree)\n self.write(')')\n\n def dispatchMessageLoop(self, tree):\n \"\"\"\n This is a special case of a range based for loop in which iterator item returns a const referecne to the message.\n Any user specified message value can be used.\n \"\"\"\n self.fill('for (const auto& ')\n self.dispatch(tree.target)\n self.write(' : ')\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t,\n f\"Message input loop requires use of '{self._input_message_var}' as iterator.\"\n )\n self.write(f'FLAMEGPU->{self._input_message_var}')\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n else:\n self.RaiseError(tree,\n f'Message input loop iterator in unsupported format')\n self.write(')')\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None\n\n def dispatchMemberFunction(self, t, t_parent):\n \"\"\"\n A very limited set of function calls to members are supported so these are fully evaluated here.\n t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)\n Function calls permitted are;\n * pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.\n * message_input.function - a call to the message input variable (the name of which is specified in the function definition)\n * msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)\n * message_output.function - a call to the message output variable (the name of which is specified in the function definition)\n * pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.\n * math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`\n * numpy.type - Any numpy types are translated to static casts\n \"\"\"\n if not hasattr(t, 'value'):\n self.RaiseError(t, f'Function call is in an unsupported format.')\n if isinstance(t.value, ast.Attribute):\n t_parent.call_type = None\n if not isinstance(t.value.value, ast.Name):\n self.RaiseError(t, 'Unknown or unsupported nested attribute')\n if (t.value.value.id == 'pyflamegpu' and t.value.attr ==\n 'environment'):\n self.write('FLAMEGPU->environment.')\n if t.attr in self.fgpu_env_funcs:\n self.write(t.attr)\n elif t.attr.startswith('getProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getProperty'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment property function.\"\n )\n self.write(py_func)\n t_parent.call_type = 'Environment'\n elif t.attr.startswith('getMacroProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getMacroProperty'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.\"\n )\n self.dispatchMacroEnvFunction(t, t_parent)\n t_parent.call_type = 'MacroEnvironment'\n else:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.environment object\"\n )\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'random':\n self.write('FLAMEGPU->random.')\n if t.attr in self.fgpu_rand_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'uniform', 'normal', 'logNormal'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.random object\"\n )\n self.write(py_func)\n t_parent.call_type = 'Random'\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'agent_out':\n self.write('FLAMEGPU->agent_out.')\n if t.attr in self.fgpu_agent_out_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.agent_out object\"\n )\n self.write(py_func)\n t_parent.call_type = 'AgentOut'\n else:\n self.RaiseError(t,\n f'Unknown or unsupported nested attribute in {t.value.value.id}'\n )\n elif isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n self.write('FLAMEGPU->')\n if t.attr in self.fgpu_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable', 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu object\"\n )\n self.write(py_func)\n elif t.value.id == self._input_message_var:\n if t.attr in self.fgpu_input_msg_funcs:\n self.write(f'FLAMEGPU->{self._input_message_var}.')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'\"\n )\n elif self._message_iterator_var and t.value.id == self._message_iterator_var:\n self.write(f'{self._message_iterator_var}.')\n if t.attr in self.fgpu_input_msg_iter_var_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object\"\n )\n self.write(py_func)\n elif t.value.id == self._output_message_var:\n self.write('FLAMEGPU->message_out.')\n if t.attr in self.fgpu_output_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object\"\n )\n self.write(py_func)\n elif t.value.id == 'math':\n self.write(t.attr)\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(f'static_cast<{self.numpytypes[t.attr]}>')\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n elif t.value.id in self._locals:\n self.write(f'{t.value.id}.{t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifier not supported\")\n elif isinstance(t.value, ast.Call):\n self.dispatchMemberFunction(t.value.func, t.value)\n if t.value.call_type != 'MacroEnvironment':\n self.RaiseError(t, f'Function call {t.attr} is not supported')\n if not t.attr in self.fgpu_env_macro_funcs:\n self.RaiseError(t,\n f'Function {t.attr} is not a valid macro environment function'\n )\n self.write('(')\n self._CallArguments(t.value)\n self.write(')')\n self.write(f'.{t.attr}')\n else:\n self.RaiseError(t, 'Unsupported function call syntax')\n\n def _Module(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Interactive(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Expression(self, tree):\n self.dispatch(tree.body)\n\n def _Expr(self, tree):\n \"\"\"\n Same as a standard python expression but ends with semicolon\n \"\"\"\n if isinstance(tree.value, ast.Constant):\n if isinstance(tree.value.value, str):\n return\n elif isinstance(tree.value, ast.Str):\n return\n self.fill()\n self.dispatch(tree.value)\n self.write(';')\n\n def _NamedExpr(self, tree):\n \"\"\"\n No such concept in C++. Standard assignment can be used in any location.\n \"\"\"\n self.write('(')\n self.dispatch(tree.target)\n self.write(' = ')\n self.dispatch(tree.value)\n self.write(')')\n\n def _Import(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _ImportFrom(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _Assign(self, t):\n \"\"\"\n Assignment will use the auto type to define a variable at first use else will perform standard assignment.\n Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.\n Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.\n \"\"\"\n if len(t.targets) > 1:\n self.RaiseError(t, 'Assignment to multiple targets not supported')\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t,\n 'Assignment to complex expressions not supported')\n self.fill()\n if t.targets[0].id not in self._locals:\n self.write('auto ')\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n\n def _AugAssign(self, t):\n \"\"\"\n Similar to assignment in terms of restrictions. E.g. Allow only single named variable assignments.\n Also requires the named variable to already exist in scope.\n \"\"\"\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t,\n 'Augmented assignment to complex expressions not supported')\n if t.target.id not in self._locals:\n self.RaiseError(t,\n 'Augmented assignment not permitted on variables not already assigned previously'\n )\n self.fill()\n self.dispatch(t.target)\n self.write(' ' + self.binop[t.op.__class__.__name__] + '= ')\n self.dispatch(t.value)\n self.write(';')\n\n def _AnnAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t,\n 'Augmented assignment to complex expressions not supported')\n self.fill()\n self.dispatchType(t.annotation)\n self.write(' ')\n self.dispatch(t.target)\n if t.value:\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n <mask token>\n <mask token>\n\n def _Break(self, t):\n self.fill('break;')\n\n def _Continue(self, t):\n self.fill('continue;')\n\n def _Delete(self, t):\n self.RaiseError(t, 'Deletion not supported')\n\n def _Assert(self, t):\n \"\"\"\n cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy\n \"\"\"\n self.RaiseError(t, 'Assert not supported')\n\n def _Exec(self, t):\n self.RaiseError(t, 'Exec not supported')\n\n def _Print(self, t):\n \"\"\"\n This is old school python printing so no need to support\n \"\"\"\n self.RaiseError(t, 'Print not supported')\n\n def _Global(self, t):\n self.RaiseError(t, \"Use of 'global' not supported\")\n\n def _Nonlocal(self, t):\n self.RaiseError(t, \"Use of 'nonlocal' not supported\")\n <mask token>\n\n def _Yield(self, t):\n self.RaiseError(t, 'Yield not supported')\n\n def _YieldFrom(self, t):\n self.RaiseError(t, 'Yield from not supported')\n\n def _Raise(self, t):\n \"\"\"\n Exceptions are obviously supported in cpp but not in CUDA device code\n \"\"\"\n self.RaiseError(t, 'Exception raising not supported')\n\n def _Try(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryExcept(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryFinally(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _ExceptHandler(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _ClassDef(self, t):\n self.RaiseError(t, 'Class definitions not supported')\n\n def _FunctionDef(self, t):\n \"\"\"\n Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'.\n Each is then processed in a different way using a specific dispatcher.\n Function calls are actually checked and only permitted (or user defined) function calls are supported.\n \"\"\"\n self.write('\\n')\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0],\n ast.Attribute):\n self.RaiseError(t,\n \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\"\n )\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n if getattr(t, 'returns', False):\n self.RaiseWarning(t,\n \"Function definition return type not supported on 'pyflamegpu.agent_function'\"\n )\n self.fill(f'FLAMEGPU_AGENT_FUNCTION({t.name}, ')\n self.dispatchFGPUFunctionArgs(t)\n self.write(')')\n elif t.decorator_list[0\n ].attr == 'device_function' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n self.fill(f'FLAMEGPU_DEVICE_FUNCTION ')\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write('void')\n self.write(f' {t.name}(')\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(')')\n self._device_functions.append(t.name)\n elif t.decorator_list[0\n ].attr == 'agent_function_condition' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n if not hasattr(t, 'returns'):\n self.RaiseError(t,\n \"Agent function conditions must have a 'bool' return type specified as a return type annotation\"\n )\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t,\n \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t,\n \"Agent function conditions return type must be 'bool'\")\n if t.args.args:\n self.RaiseWarning(t,\n 'Agent function conditions does not support arguments. These will be discarded.'\n )\n self.fill(f'FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})')\n else:\n self.RaiseError(t,\n \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n\n def _AsyncFunctionDef(self, t):\n self.RaiseError(t, 'Async functions not supported')\n\n def _For(self, t):\n \"\"\"\n Two type for for loop are supported. Either;\n 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'\n 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop\n \"\"\"\n if isinstance(t.iter, ast.Name):\n if t.iter.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration using 'message_in' iterator\"\n )\n elif t.orelse:\n self.RaiseError(t, 'For else not supported')\n elif isinstance(t.iter, ast.Call):\n if isinstance(t.iter.func, ast.Name):\n if t.iter.func.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n elif t.iter.func.id == 'range':\n if len(t.iter.args) == 1:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=0;')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 2:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 3:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('+=')\n self.dispatch(t.iter.args[2])\n self.write(')')\n else:\n self.RaiseError(t,\n \"Range based for loops requires use of 'range' function with arguments and not keywords\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n else:\n self.RaiseError(t,\n \"Range based for loops only support calls to the 'range' function\"\n )\n elif isinstance(t.iter.func, ast.Attribute):\n if t.iter.func.value.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n 'Range based for loops only support calling members of message input variable'\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n\n def _AsyncFor(self, t):\n self.RaiseError(t, 'Async for not supported')\n\n def _If(self, t):\n \"\"\"\n Fairly straightforward translation to if, else if, else format\n \"\"\"\n self.fill('if (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0],\n ast.If):\n t = t.orelse[0]\n self.fill('else if (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.fill('else')\n self.enter()\n self.dispatch(t.orelse)\n self.leave()\n\n def _While(self, t):\n \"\"\"\n Straightforward translation to c style while loop\n \"\"\"\n self.fill('while (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, 'While else not supported')\n\n def _With(self, t):\n self.RaiseError(t, 'With not supported')\n\n def _AsyncWith(self, t):\n self.RaiseError(t, 'Async with not supported')\n\n def _Bytes(self, t):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n <mask token>\n\n def _JoinedStr(self, t):\n self.RaiseError(t, 'Joined strings not supported')\n <mask token>\n\n def _fstring_JoinedStr(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n <mask token>\n\n def _fstring_Constant(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n\n def _fstring_FormattedValue(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n\n def _Name(self, t):\n \"\"\"\n Everything ends up as a Name once it is an identifier\n \"\"\"\n self.write(t.id)\n\n def _NameConstant(self, t):\n if t.value == None:\n self.write(0)\n elif t.value:\n self.write('true')\n else:\n self.write('false')\n\n def _Repr(self, t):\n self.RaiseError(t, 'Repr not supported')\n\n def _Constant(self, t):\n \"\"\"\n Restrict most types of constant except for numeric types and constant strings\n Picks up some obvious conversions such as None and Bools\n \"\"\"\n value = t.value\n if isinstance(value, tuple):\n self.RaiseError(t, 'Tuples not supported')\n if isinstance(value, dict):\n self.RaiseError(t, 'Dictionaries not supported')\n if isinstance(value, list):\n self.RaiseError(t, 'Lists not supported')\n elif value is Ellipsis:\n self.RaiseError(t, 'Ellipsis not supported')\n elif isinstance(value, str):\n self.write(f'\"{value}\"')\n elif isinstance(value, (bytes, bytearray)):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n elif isinstance(value, bool):\n if value:\n self.write('true')\n else:\n self.write('false')\n elif value == None:\n self.write(0)\n else:\n self.write(repr(value))\n\n def _Num(self, t):\n self.write(repr(t.n))\n\n def _List(self, t):\n self.RaiseError(t, 'Lists not supported')\n\n def _ListComp(self, t):\n self.RaiseError(t, 'List comprehension not supported')\n\n def _GeneratorExp(self, t):\n self.RaiseError(t, 'Generator expressions not supported')\n\n def _SetComp(self, t):\n self.RaiseError(t, 'Set comprehension not supported')\n\n def _DictComp(self, t):\n self.RaiseError(t, 'Dictionary comprehension not supported')\n\n def _comprehension(self, t):\n self.RaiseError(t, 'Comprehension not supported')\n\n def _IfExp(self, t):\n \"\"\"\n Equivalent to a ternary operator\n \"\"\"\n self.dispatch(t.test)\n self.write(' ? ')\n self.dispatch(t.body)\n self.write(' : ')\n self.dispatch(t.orelse)\n <mask token>\n\n def _Dict(self, t):\n self.RaiseError(t, 'Dictionaries not supported')\n\n def _Tuple(self, t):\n self.RaiseError(t, 'Tuples not supported')\n <mask token>\n <mask token>\n <mask token>\n\n def _BinOp(self, t):\n \"\"\"\n Python style pow and floordiv are not supported so translate to a function call.\n No matrix mul support.\n \"\"\"\n op_name = t.op.__class__.__name__\n if op_name == 'Pow':\n self.write('pow(')\n self.dispatch(t.left)\n self.write(', ')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'FloorDiv':\n self.write('floor(')\n self.dispatch(t.left)\n self.write('/')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'MatMult':\n self.RaiseError(t, 'Matrix multiplier operator not supported')\n else:\n self.write('(')\n self.dispatch(t.left)\n self.write(' ' + self.binop[op_name] + ' ')\n self.dispatch(t.right)\n self.write(')')\n <mask token>\n\n def _Compare(self, t):\n self.dispatch(t.left)\n for o, e in zip(t.ops, t.comparators):\n if o.__class__.__name__ == 'In' or o.__class__.__name__ == 'NotIn':\n self.RaiseError(t, 'In and NotIn operators not supported')\n self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')\n self.dispatch(e)\n <mask token>\n <mask token>\n\n def _Attribute(self, t):\n \"\"\"\n A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.\n Attributes supported are only;\n * pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.\n * math.constant - Any supported math constants are translated to C definition versions\n \"\"\"\n func_dict = None\n if isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n if t.attr in self.fgpu_attrs:\n self.write('flamegpu::')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Attribute '{t.attr}' does not exist in pyflamegpu object\"\n )\n elif t.value.id == 'math':\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, 'Unsupported attribute')\n\n def _CallArguments(self, t):\n comma = False\n for e in t.args:\n if comma:\n self.write(', ')\n else:\n comma = True\n self.dispatch(e)\n if len(t.keywords):\n self.RaiseWarning(t, 'Keyword argument not supported. Ignored.')\n if sys.version_info[:2] < (3, 5):\n if t.starargs:\n self.RaiseWarning(t, 'Starargs not supported. Ignored.')\n if t.kwargs:\n self.RaiseWarning(t, 'Kwargs not supported. Ignored.')\n <mask token>\n <mask token>\n\n def _Starred(self, t):\n self.RaiseError(t, 'Starred values not supported')\n\n def _Ellipsis(self, t):\n self.RaiseError(t, 'Ellipsis values not supported')\n\n def _Index(self, t):\n self.RaiseError(t, 'Index values not supported')\n\n def _Slice(self, t):\n self.RaiseError(t, 'Slicing values not supported')\n <mask token>\n\n def _arg(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n\n def _arguments(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n\n def _keyword(self, t):\n self.RaiseError(t, 'Keywords are not supported')\n\n def _Lambda(self, t):\n self.RaiseError(t, 'Lambda is not supported')\n\n def _alias(self, t):\n self.RaiseError(t, 'Aliasing is not supported')\n\n def _withitem(self, t):\n self.RaiseError(t, 'With not supported')\n",
"step-4": "<mask token>\n\n\nclass CodeGenerator:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, tree, file=sys.stdout):\n \"\"\"CodeGenerator(tree, file=sys.stdout) -> None.\n Print the source for tree to file.\"\"\"\n self.f = file\n self.future_imports = []\n self._indent = 0\n self._locals = ['pyflamegpu']\n self._device_functions = []\n self._message_iterator_var = None\n self._input_message_var = 'message_in'\n self._output_message_var = 'message_out'\n self.dispatch(tree)\n print('', file=self.f)\n self.f.flush()\n\n def _deviceVariableFunctionName(self, tree, permitted_prefixes,\n allow_lengths=True):\n \"\"\"\n Gets the device function name by translating a typed Python version to a templated cpp version.\n Python functions looks like getVariableFloatArray6 and translate to getVariable<float, 6>\n This function will detect and test against a set of known types and also extract the Array length\n This function returns None if the string is invalid in format but only throws an error if the format is correct but the type is invalid.\n \"\"\"\n cpp_func_name = ''\n py_func = tree.attr\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break\n else:\n return None\n if allow_lengths:\n type_and_length = py_func.split('Array')\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree,\n f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n if len(type_and_length) == 1:\n cpp_func_name += f'<{t}>'\n elif len(type_and_length) == 2:\n cpp_func_name += f'<{t}, {type_and_length[1]}>'\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree,\n f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f'<{t}>'\n return cpp_func_name\n\n def fill(self, text=''):\n \"\"\"Indent a piece of text, according to the current indentation level\"\"\"\n self.f.write('\\n' + ' ' * self._indent + text)\n\n def write(self, text):\n \"\"\"Append a piece of text to the current line.\"\"\"\n self.f.write(str(text))\n\n def enter(self):\n \"\"\"Print '{', and increase the indentation.\"\"\"\n self.write('{')\n self._indent += 1\n\n def leave(self):\n \"\"\"Decrease the indentation level and Print '}'\"\"\"\n self._indent -= 1\n self.fill('}')\n\n def dispatch(self, tree):\n \"\"\"Dispatcher function, dispatching tree type T to method _T.\"\"\"\n if isinstance(tree, list):\n for t in tree:\n self.dispatch(t)\n return\n meth = getattr(self, '_' + tree.__class__.__name__)\n meth(tree)\n\n def RaiseWarning(self, tree, str):\n warnings.warn(f'Warning ({tree.lineno}, {tree.col_offset}): {str}')\n\n def RaiseError(self, tree, str):\n raise CodeGenException(\n f'Error ({tree.lineno}, {tree.col_offset}): {str}')\n\n def dispatchMacroEnvFunction(self, tree, tree_parent):\n \"\"\"\n Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))\n \"\"\"\n cpp_func_name = 'getMacroProperty'\n py_func = tree.attr\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_type]\n cpp_func_name += f'<{t}'\n if not tree_parent.args:\n self.RaiseError(tree,\n f\" Macro environment function '{py_func}' is expected to have some arguments.\"\n )\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n for i in bounds:\n if isinstance(i, ast.Num):\n if not isinstance(i.n, int):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an integer value.\"\n )\n cpp_func_name += f', {i.n}'\n else:\n if not isinstance(i, ast.Constant):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\"\n )\n if not isinstance(i.value, int):\n self.RaiseError(tree,\n f\" Macro environment function argument '{i}' should be an integer value.\"\n )\n cpp_func_name += f', {i.value}'\n del tree_parent.args[1:]\n cpp_func_name += '>'\n self.write(cpp_func_name)\n\n def dispatchFGPUFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`\n Type hinting is required to translate a type into a FLAME GPU Message type implementation\n \"\"\"\n self._locals = ['pyflamegpu']\n if len(tree.args.args) != 2:\n self.RaiseError(tree,\n 'Expected two FLAME GPU function arguments (input message and output message)'\n )\n if not tree.args.args[0].annotation:\n self.RaiseError(tree.args.args[0],\n 'Message input requires a supported type annotation')\n if not isinstance(tree.args.args[0].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[0].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[0],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n input_message_attr = tree.args.args[0\n ].annotation.value.id + '.' + tree.args.args[0].annotation.attr\n if input_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[0],\n 'Message input type annotation not a supported message type')\n self._input_message_var = tree.args.args[0].arg\n self.write(f'flamegpu::{tree.args.args[0].annotation.attr}')\n self.write(', ')\n if not tree.args.args[1].annotation:\n self.RaiseError(tree.args.args[1],\n 'Message output requires a supported type annotation')\n if not isinstance(tree.args.args[1].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n if not isinstance(tree.args.args[1].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation should be an attribute of the form pyflamegpu.MessageType'\n )\n output_message_attr = tree.args.args[1\n ].annotation.value.id + '.' + tree.args.args[1].annotation.attr\n if output_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[1],\n 'Message output type annotation not a supported message type')\n self._output_message_var = tree.args.args[1].arg\n self.write(f'flamegpu::{tree.args.args[1].annotation.attr}')\n\n def dispatchType(self, tree):\n \"\"\"\n There is a limited set of types and formats of type description supported. Types can be either;\n 1) A python built in type of int or float, or\n 2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16\n This function translates and a catches unsupported types but does not translate a function call (i.e. cast)\n \"\"\"\n if isinstance(tree, ast.Name):\n if tree.id not in self.basic_arg_types:\n self.RaiseError(tree, 'Not a supported type')\n self.write(tree.id)\n elif isinstance(tree, ast.Attribute):\n if not isinstance(tree.value, ast.Name):\n self.RaiseError(tree, 'Not a supported type')\n if not (tree.value.id == 'numpy' or tree.value.id == 'np'):\n self.RaiseError(tree, 'Not a supported type')\n if tree.attr not in self.numpytypes:\n self.RaiseError(tree, 'Not a supported numpy type')\n self.write(self.numpytypes[tree.attr])\n else:\n self.RaiseError(tree, 'Not a supported type')\n\n def dispatchFGPUDeviceFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.\n \"\"\"\n self._locals = ['pyflamegpu']\n first = True\n annotation = None\n for arg in tree.args.args:\n if not arg.annotation:\n self.RaiseError(arg,\n 'Device function argument requires type annotation')\n if not first:\n self.write(', ')\n self.dispatchType(arg.annotation)\n self.write(f' {arg.arg}')\n self._locals.append(arg.arg)\n first = False\n\n def dispatchMessageIteratorCall(self, tree):\n \"\"\"\n Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())\n Using this function avoid using the global call one which may accept member function calls to things that are not iterators.\n \"\"\"\n if isinstance(tree.func, ast.Name):\n self.write(f'FLAMEGPU->{tree.func.id}')\n if isinstance(tree.func, ast.Attribute):\n if isinstance(tree.func.value, ast.Name):\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree,\n f\"Message input loop iterator '{tree.func.attr}' is not supported.\"\n )\n self.write(f'FLAMEGPU->{tree.func.value.id}.{tree.func.attr}')\n else:\n self.RaiseError(tree,\n 'Message input loop iterator format incorrect.')\n self.write('(')\n self._CallArguments(tree)\n self.write(')')\n\n def dispatchMessageLoop(self, tree):\n \"\"\"\n This is a special case of a range based for loop in which iterator item returns a const referecne to the message.\n Any user specified message value can be used.\n \"\"\"\n self.fill('for (const auto& ')\n self.dispatch(tree.target)\n self.write(' : ')\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t,\n f\"Message input loop requires use of '{self._input_message_var}' as iterator.\"\n )\n self.write(f'FLAMEGPU->{self._input_message_var}')\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n else:\n self.RaiseError(tree,\n f'Message input loop iterator in unsupported format')\n self.write(')')\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None\n\n def dispatchMemberFunction(self, t, t_parent):\n \"\"\"\n A very limited set of function calls to members are supported so these are fully evaluated here.\n t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)\n Function calls permitted are;\n * pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.\n * message_input.function - a call to the message input variable (the name of which is specified in the function definition)\n * msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)\n * message_output.function - a call to the message output variable (the name of which is specified in the function definition)\n * pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.\n * math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`\n * numpy.type - Any numpy types are translated to static casts\n \"\"\"\n if not hasattr(t, 'value'):\n self.RaiseError(t, f'Function call is in an unsupported format.')\n if isinstance(t.value, ast.Attribute):\n t_parent.call_type = None\n if not isinstance(t.value.value, ast.Name):\n self.RaiseError(t, 'Unknown or unsupported nested attribute')\n if (t.value.value.id == 'pyflamegpu' and t.value.attr ==\n 'environment'):\n self.write('FLAMEGPU->environment.')\n if t.attr in self.fgpu_env_funcs:\n self.write(t.attr)\n elif t.attr.startswith('getProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getProperty'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment property function.\"\n )\n self.write(py_func)\n t_parent.call_type = 'Environment'\n elif t.attr.startswith('getMacroProperty'):\n py_func = self._deviceVariableFunctionName(t, [\n 'getMacroProperty'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.\"\n )\n self.dispatchMacroEnvFunction(t, t_parent)\n t_parent.call_type = 'MacroEnvironment'\n else:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.environment object\"\n )\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'random':\n self.write('FLAMEGPU->random.')\n if t.attr in self.fgpu_rand_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'uniform', 'normal', 'logNormal'], allow_lengths=False)\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.random object\"\n )\n self.write(py_func)\n t_parent.call_type = 'Random'\n elif t.value.value.id == 'pyflamegpu' and t.value.attr == 'agent_out':\n self.write('FLAMEGPU->agent_out.')\n if t.attr in self.fgpu_agent_out_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu.agent_out object\"\n )\n self.write(py_func)\n t_parent.call_type = 'AgentOut'\n else:\n self.RaiseError(t,\n f'Unknown or unsupported nested attribute in {t.value.value.id}'\n )\n elif isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n self.write('FLAMEGPU->')\n if t.attr in self.fgpu_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable', 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in pyflamegpu object\"\n )\n self.write(py_func)\n elif t.value.id == self._input_message_var:\n if t.attr in self.fgpu_input_msg_funcs:\n self.write(f'FLAMEGPU->{self._input_message_var}.')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'\"\n )\n elif self._message_iterator_var and t.value.id == self._message_iterator_var:\n self.write(f'{self._message_iterator_var}.')\n if t.attr in self.fgpu_input_msg_iter_var_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'getVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object\"\n )\n self.write(py_func)\n elif t.value.id == self._output_message_var:\n self.write('FLAMEGPU->message_out.')\n if t.attr in self.fgpu_output_msg_funcs:\n self.write(t.attr)\n else:\n py_func = self._deviceVariableFunctionName(t, [\n 'setVariable'])\n if not py_func:\n self.RaiseError(t,\n f\"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object\"\n )\n self.write(py_func)\n elif t.value.id == 'math':\n self.write(t.attr)\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(f'static_cast<{self.numpytypes[t.attr]}>')\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n elif t.value.id in self._locals:\n self.write(f'{t.value.id}.{t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifier not supported\")\n elif isinstance(t.value, ast.Call):\n self.dispatchMemberFunction(t.value.func, t.value)\n if t.value.call_type != 'MacroEnvironment':\n self.RaiseError(t, f'Function call {t.attr} is not supported')\n if not t.attr in self.fgpu_env_macro_funcs:\n self.RaiseError(t,\n f'Function {t.attr} is not a valid macro environment function'\n )\n self.write('(')\n self._CallArguments(t.value)\n self.write(')')\n self.write(f'.{t.attr}')\n else:\n self.RaiseError(t, 'Unsupported function call syntax')\n\n def _Module(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Interactive(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Expression(self, tree):\n self.dispatch(tree.body)\n\n def _Expr(self, tree):\n \"\"\"\n Same as a standard python expression but ends with semicolon\n \"\"\"\n if isinstance(tree.value, ast.Constant):\n if isinstance(tree.value.value, str):\n return\n elif isinstance(tree.value, ast.Str):\n return\n self.fill()\n self.dispatch(tree.value)\n self.write(';')\n\n def _NamedExpr(self, tree):\n \"\"\"\n No such concept in C++. Standard assignment can be used in any location.\n \"\"\"\n self.write('(')\n self.dispatch(tree.target)\n self.write(' = ')\n self.dispatch(tree.value)\n self.write(')')\n\n def _Import(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _ImportFrom(self, t):\n self.RaiseError(t, 'Importing of modules not supported')\n\n def _Assign(self, t):\n \"\"\"\n Assignment will use the auto type to define a variable at first use else will perform standard assignment.\n Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.\n Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.\n \"\"\"\n if len(t.targets) > 1:\n self.RaiseError(t, 'Assignment to multiple targets not supported')\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t,\n 'Assignment to complex expressions not supported')\n self.fill()\n if t.targets[0].id not in self._locals:\n self.write('auto ')\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n\n def _AugAssign(self, t):\n \"\"\"\n Similar to assignment in terms of restrictions. E.g. Allow only single named variable assignments.\n Also requires the named variable to already exist in scope.\n \"\"\"\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t,\n 'Augmented assignment to complex expressions not supported')\n if t.target.id not in self._locals:\n self.RaiseError(t,\n 'Augmented assignment not permitted on variables not already assigned previously'\n )\n self.fill()\n self.dispatch(t.target)\n self.write(' ' + self.binop[t.op.__class__.__name__] + '= ')\n self.dispatch(t.value)\n self.write(';')\n\n def _AnnAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t,\n 'Augmented assignment to complex expressions not supported')\n self.fill()\n self.dispatchType(t.annotation)\n self.write(' ')\n self.dispatch(t.target)\n if t.value:\n self.write(' = ')\n self.dispatch(t.value)\n self.write(';')\n\n def _Return(self, t):\n \"\"\"\n Standard cpp like return with semicolon.\n \"\"\"\n self.fill('return')\n if t.value:\n self.write(' ')\n self.dispatch(t.value)\n self.write(';')\n\n def _Pass(self, t):\n self.fill(';')\n\n def _Break(self, t):\n self.fill('break;')\n\n def _Continue(self, t):\n self.fill('continue;')\n\n def _Delete(self, t):\n self.RaiseError(t, 'Deletion not supported')\n\n def _Assert(self, t):\n \"\"\"\n cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy\n \"\"\"\n self.RaiseError(t, 'Assert not supported')\n\n def _Exec(self, t):\n self.RaiseError(t, 'Exec not supported')\n\n def _Print(self, t):\n \"\"\"\n This is old school python printing so no need to support\n \"\"\"\n self.RaiseError(t, 'Print not supported')\n\n def _Global(self, t):\n self.RaiseError(t, \"Use of 'global' not supported\")\n\n def _Nonlocal(self, t):\n self.RaiseError(t, \"Use of 'nonlocal' not supported\")\n\n def _Await(self, t):\n self.RaiseError(t, 'Await not supported')\n\n def _Yield(self, t):\n self.RaiseError(t, 'Yield not supported')\n\n def _YieldFrom(self, t):\n self.RaiseError(t, 'Yield from not supported')\n\n def _Raise(self, t):\n \"\"\"\n Exceptions are obviously supported in cpp but not in CUDA device code\n \"\"\"\n self.RaiseError(t, 'Exception raising not supported')\n\n def _Try(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryExcept(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _TryFinally(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _ExceptHandler(self, t):\n self.RaiseError(t, 'Exceptions not supported')\n\n def _ClassDef(self, t):\n self.RaiseError(t, 'Class definitions not supported')\n\n def _FunctionDef(self, t):\n \"\"\"\n Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'.\n Each is then processed in a different way using a specific dispatcher.\n Function calls are actually checked and only permitted (or user defined) function calls are supported.\n \"\"\"\n self.write('\\n')\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0],\n ast.Attribute):\n self.RaiseError(t,\n \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\"\n )\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n if getattr(t, 'returns', False):\n self.RaiseWarning(t,\n \"Function definition return type not supported on 'pyflamegpu.agent_function'\"\n )\n self.fill(f'FLAMEGPU_AGENT_FUNCTION({t.name}, ')\n self.dispatchFGPUFunctionArgs(t)\n self.write(')')\n elif t.decorator_list[0\n ].attr == 'device_function' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n self.fill(f'FLAMEGPU_DEVICE_FUNCTION ')\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write('void')\n self.write(f' {t.name}(')\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(')')\n self._device_functions.append(t.name)\n elif t.decorator_list[0\n ].attr == 'agent_function_condition' and t.decorator_list[0\n ].value.id == 'pyflamegpu':\n if not hasattr(t, 'returns'):\n self.RaiseError(t,\n \"Agent function conditions must have a 'bool' return type specified as a return type annotation\"\n )\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t,\n \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t,\n \"Agent function conditions return type must be 'bool'\")\n if t.args.args:\n self.RaiseWarning(t,\n 'Agent function conditions does not support arguments. These will be discarded.'\n )\n self.fill(f'FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})')\n else:\n self.RaiseError(t,\n \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n\n def _AsyncFunctionDef(self, t):\n self.RaiseError(t, 'Async functions not supported')\n\n def _For(self, t):\n \"\"\"\n Two type for for loop are supported. Either;\n 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'\n 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop\n \"\"\"\n if isinstance(t.iter, ast.Name):\n if t.iter.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration using 'message_in' iterator\"\n )\n elif t.orelse:\n self.RaiseError(t, 'For else not supported')\n elif isinstance(t.iter, ast.Call):\n if isinstance(t.iter.func, ast.Name):\n if t.iter.func.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n elif t.iter.func.id == 'range':\n if len(t.iter.args) == 1:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=0;')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 2:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('++)')\n elif len(t.iter.args) == 3:\n self.fill(f'for (int ')\n self.dispatch(t.target)\n self.write('=')\n self.dispatch(t.iter.args[0])\n self.write(';')\n self.dispatch(t.target)\n self.write('<')\n self.dispatch(t.iter.args[1])\n self.write(';')\n self.dispatch(t.target)\n self.write('+=')\n self.dispatch(t.iter.args[2])\n self.write(')')\n else:\n self.RaiseError(t,\n \"Range based for loops requires use of 'range' function with arguments and not keywords\"\n )\n self.enter()\n self.dispatch(t.body)\n self.leave()\n else:\n self.RaiseError(t,\n \"Range based for loops only support calls to the 'range' function\"\n )\n elif isinstance(t.iter.func, ast.Attribute):\n if t.iter.func.value.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t,\n 'Range based for loops only support calling members of message input variable'\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n else:\n self.RaiseError(t,\n \"Range based for loops only support message iteration or use of 'range'\"\n )\n\n def _AsyncFor(self, t):\n self.RaiseError(t, 'Async for not supported')\n\n def _If(self, t):\n \"\"\"\n Fairly straightforward translation to if, else if, else format\n \"\"\"\n self.fill('if (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0],\n ast.If):\n t = t.orelse[0]\n self.fill('else if (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.fill('else')\n self.enter()\n self.dispatch(t.orelse)\n self.leave()\n\n def _While(self, t):\n \"\"\"\n Straightforward translation to c style while loop\n \"\"\"\n self.fill('while (')\n self.dispatch(t.test)\n self.write(')')\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, 'While else not supported')\n\n def _With(self, t):\n self.RaiseError(t, 'With not supported')\n\n def _AsyncWith(self, t):\n self.RaiseError(t, 'Async with not supported')\n\n def _Bytes(self, t):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n\n def _Str(self, tree):\n self.write(f'\"{tree.s}\"')\n\n def _JoinedStr(self, t):\n self.RaiseError(t, 'Joined strings not supported')\n <mask token>\n\n def _fstring_JoinedStr(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n <mask token>\n\n def _fstring_Constant(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n\n def _fstring_FormattedValue(self, t, write):\n self.RaiseError(t, 'F strings not supported')\n\n def _Name(self, t):\n \"\"\"\n Everything ends up as a Name once it is an identifier\n \"\"\"\n self.write(t.id)\n\n def _NameConstant(self, t):\n if t.value == None:\n self.write(0)\n elif t.value:\n self.write('true')\n else:\n self.write('false')\n\n def _Repr(self, t):\n self.RaiseError(t, 'Repr not supported')\n\n def _Constant(self, t):\n \"\"\"\n Restrict most types of constant except for numeric types and constant strings\n Picks up some obvious conversions such as None and Bools\n \"\"\"\n value = t.value\n if isinstance(value, tuple):\n self.RaiseError(t, 'Tuples not supported')\n if isinstance(value, dict):\n self.RaiseError(t, 'Dictionaries not supported')\n if isinstance(value, list):\n self.RaiseError(t, 'Lists not supported')\n elif value is Ellipsis:\n self.RaiseError(t, 'Ellipsis not supported')\n elif isinstance(value, str):\n self.write(f'\"{value}\"')\n elif isinstance(value, (bytes, bytearray)):\n self.RaiseError(t, 'Byte strings and Bytes function not supported')\n elif isinstance(value, bool):\n if value:\n self.write('true')\n else:\n self.write('false')\n elif value == None:\n self.write(0)\n else:\n self.write(repr(value))\n\n def _Num(self, t):\n self.write(repr(t.n))\n\n def _List(self, t):\n self.RaiseError(t, 'Lists not supported')\n\n def _ListComp(self, t):\n self.RaiseError(t, 'List comprehension not supported')\n\n def _GeneratorExp(self, t):\n self.RaiseError(t, 'Generator expressions not supported')\n\n def _SetComp(self, t):\n self.RaiseError(t, 'Set comprehension not supported')\n\n def _DictComp(self, t):\n self.RaiseError(t, 'Dictionary comprehension not supported')\n\n def _comprehension(self, t):\n self.RaiseError(t, 'Comprehension not supported')\n\n def _IfExp(self, t):\n \"\"\"\n Equivalent to a ternary operator\n \"\"\"\n self.dispatch(t.test)\n self.write(' ? ')\n self.dispatch(t.body)\n self.write(' : ')\n self.dispatch(t.orelse)\n\n def _Set(self, t):\n self.RaiseError(t, 'Sets not supported')\n\n def _Dict(self, t):\n self.RaiseError(t, 'Dictionaries not supported')\n\n def _Tuple(self, t):\n self.RaiseError(t, 'Tuples not supported')\n <mask token>\n\n def _UnaryOp(self, t):\n \"\"\"\n Translate to C equivalent opertaors\n \"\"\"\n self.write('(')\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(')')\n <mask token>\n\n def _BinOp(self, t):\n \"\"\"\n Python style pow and floordiv are not supported so translate to a function call.\n No matrix mul support.\n \"\"\"\n op_name = t.op.__class__.__name__\n if op_name == 'Pow':\n self.write('pow(')\n self.dispatch(t.left)\n self.write(', ')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'FloorDiv':\n self.write('floor(')\n self.dispatch(t.left)\n self.write('/')\n self.dispatch(t.right)\n self.write(')')\n elif op_name == 'MatMult':\n self.RaiseError(t, 'Matrix multiplier operator not supported')\n else:\n self.write('(')\n self.dispatch(t.left)\n self.write(' ' + self.binop[op_name] + ' ')\n self.dispatch(t.right)\n self.write(')')\n <mask token>\n\n def _Compare(self, t):\n self.dispatch(t.left)\n for o, e in zip(t.ops, t.comparators):\n if o.__class__.__name__ == 'In' or o.__class__.__name__ == 'NotIn':\n self.RaiseError(t, 'In and NotIn operators not supported')\n self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')\n self.dispatch(e)\n <mask token>\n\n def _BoolOp(self, t):\n \"\"\"\n Translate to logical and/or operators in C\n \"\"\"\n self.write('(')\n s = ' %s ' % self.boolops[t.op.__class__]\n interleave(lambda : self.write(s), self.dispatch, t.values)\n self.write(')')\n\n def _Attribute(self, t):\n \"\"\"\n A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.\n Attributes supported are only;\n * pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.\n * math.constant - Any supported math constants are translated to C definition versions\n \"\"\"\n func_dict = None\n if isinstance(t.value, ast.Name):\n if t.value.id == 'pyflamegpu':\n if t.attr in self.fgpu_attrs:\n self.write('flamegpu::')\n self.write(t.attr)\n else:\n self.RaiseError(t,\n f\"Attribute '{t.attr}' does not exist in pyflamegpu object\"\n )\n elif t.value.id == 'math':\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n elif t.value.id == 'numpy' or t.value.id == 'np':\n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else:\n self.RaiseError(t, f'Unsupported numpy type {t.attr}')\n else:\n self.RaiseError(t,\n f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, 'Unsupported attribute')\n\n def _CallArguments(self, t):\n comma = False\n for e in t.args:\n if comma:\n self.write(', ')\n else:\n comma = True\n self.dispatch(e)\n if len(t.keywords):\n self.RaiseWarning(t, 'Keyword argument not supported. Ignored.')\n if sys.version_info[:2] < (3, 5):\n if t.starargs:\n self.RaiseWarning(t, 'Starargs not supported. Ignored.')\n if t.kwargs:\n self.RaiseWarning(t, 'Kwargs not supported. Ignored.')\n\n def _Call(self, t):\n \"\"\"\n Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function.\n A special dispatcher is required \n \"\"\"\n funcs = self._device_functions + self.pythonbuiltins + [self.\n _input_message_var]\n if isinstance(t.func, ast.Name):\n if t.func.id not in funcs:\n self.RaiseWarning(t,\n 'Function call is not a defined FLAME GPU device function or a supported python built in.'\n )\n self.dispatch(t.func)\n elif isinstance(t.func, ast.Lambda):\n self.dispatch(t.func)\n else:\n self.dispatchMemberFunction(t.func, t)\n self.write('(')\n self._CallArguments(t)\n self.write(')')\n\n def _Subscript(self, t):\n \"\"\"\n Arrays are not supported but subscript allows accessing array like variables which is required for macro environment properties (e.g. a[0][1][2])\n Obvious limitation is no slicing type syntax (e.g. a[:2])\n \"\"\"\n self.dispatch(t.value)\n self.write('[')\n self.dispatch(t.slice)\n self.write(']')\n\n def _Starred(self, t):\n self.RaiseError(t, 'Starred values not supported')\n\n def _Ellipsis(self, t):\n self.RaiseError(t, 'Ellipsis values not supported')\n\n def _Index(self, t):\n self.RaiseError(t, 'Index values not supported')\n\n def _Slice(self, t):\n self.RaiseError(t, 'Slicing values not supported')\n\n def _ExtSlice(self, t):\n self.RaiseError(t, 'ExtSlice values not supported')\n\n def _arg(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n\n def _arguments(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, 'Arguments should already have been processed')\n\n def _keyword(self, t):\n self.RaiseError(t, 'Keywords are not supported')\n\n def _Lambda(self, t):\n self.RaiseError(t, 'Lambda is not supported')\n\n def _alias(self, t):\n self.RaiseError(t, 'Aliasing is not supported')\n\n def _withitem(self, t):\n self.RaiseError(t, 'With not supported')\n",
"step-5": "from __future__ import print_function, unicode_literals\nimport sys\nimport ast\nimport os\nimport tokenize\nimport warnings\nfrom io import StringIO\n\ndef interleave(inter, f, seq):\n \"\"\"Call f on each item in seq, calling inter() in between.\n \"\"\"\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)\n\nclass CodeGenException(Exception):\n \"\"\" Generic exception for errors raised in code generation \"\"\"\n pass\n\nclass CodeGenerator:\n \"\"\"Methods in this class recursively traverse an AST and\n output source code for the abstract syntax; original formatting\n is disregarded. \"\"\"\n\n # represents built in functions\n pythonbuiltins = [\"abs\", \"float\", \"int\"]\n \n # basic types\n basic_arg_types = ['float', 'int']\n \n # supported math constansts \n mathconsts = {\"pi\": \"M_PI\",\n \"e\": \"M_E\",\n \"inf\": \"INFINITY\",\n \"nan\": \"NAN\",\n }\n \n # support for most numpy types except complex numbers and float>64bit\n numpytypes = {\"byte\": \"char\",\n \"ubyte\": \"unsigned char\",\n \"short\": \"short\",\n \"ushort\": \"unsigned short\",\n \"intc\": \"int\",\n \"uintc\": \"unsigned int\",\n \"uint\": \"unisgned int\",\n \"longlong\": \"long long\",\n \"ulonglong\": \"unsigned long long\",\n \"half\": \"half\", # cuda supported \n \"single\": \"float\",\n \"double\": \"double\",\n \"longdouble\": \"long double\",\n \"bool_\": \"bool\",\n \"bool8\": \"bool\",\n # sized aliases\n \"int_\": \"long\",\n \"int8\": \"int8_t\",\n \"int16\": \"int16_t\",\n \"int32\": \"int32_t\",\n \"int64\": \"int64_t\",\n \"intp\": \"intptr_t\",\n \"uint_\": \"long\",\n \"uint8\": \"uint8_t\",\n \"uint16\": \"uint16_t\",\n \"uint32\": \"uint32_t\",\n \"uint64\": \"uint64_t\",\n \"uintp\": \"uintptr_t\",\n \"float_\": \"float\",\n \"float16\": \"half\",\n \"float32\": \"float\",\n \"float64\": \"double\"\n }\n \n # getVariableType and setVariableType functions are added dynamically \n fgpu_funcs = [ \"getID\", \"getStepCounter\", \"getIndex\" ] \n fgpu_attrs = [\"ALIVE\", \"DEAD\"]\n fgpu_input_msg_funcs = [\"radius\", \"at\"] # functions that can be called on message_in that do NOT return iterators\n fgpu_input_msg_iter_funcs = [\"wrap\", \"vn\", \"vn_wrap\"] # functions that can be called on message_in that do return iterators\n fgpu_input_msg_iter_var_funcs = [\"getIndex\", \"getVirtualX\", \"getVirtualY\", \"getVirtualZ\"] \n fgpu_output_msg_funcs = [\"setLocation\", \"setKey\", \"setIndex\"]\n fgpu_agent_out_msg_funcs = [\"getID\"]\n fgpu_env_funcs = [\"containsProperty\", \"containsMacroProperty\"]\n fgpu_env_macro_funcs = [\"exchange\", \"CAS\", \"min\", \"max\"]\n fgpu_rand_funcs = []\n fgpu_message_types = [\"pyflamegpu.MessageNone\", \"pyflamegpu.MessageBruteForce\", \"pyflamegpu.MessageBucket\", \"pyflamegpu.MessageSpatial2D\", \"pyflamegpu.MessageSpatial3D\", \"pyflamegpu.MessageArray\", \"pyflamegpu.MessageArray2D\", \"pyflamegpu.MessageArray3D\"]\n \n _fgpu_types = {\"Float\": \"float\",\n \"Double\": \"double\",\n \"Int\": \"int\",\n \"UInt\": \"unsigned int\",\n \"Int8\": \"int_8\",\n \"UInt8\": \"uint_8\",\n \"Char\": \"char\",\n \"UChar\": \"unsigned char\",\n \"Int16\": \"int_16\",\n \"UInt16\": \"uint_16\",\n \"Int32\": \"int_32\",\n \"UInt32\": \"uint_32\",\n \"Int64\": \"int_64\",\n \"UInt64\": \"uint_64\"\n }\n\n\n def __init__(self, tree, file = sys.stdout):\n \"\"\"CodeGenerator(tree, file=sys.stdout) -> None.\n Print the source for tree to file.\"\"\"\n self.f = file\n self.future_imports = []\n self._indent = 0\n # dict of locals used to determine if variable already exists in assignments\n self._locals = [\"pyflamegpu\"]\n self._device_functions = []\n self._message_iterator_var = None # default\n self._input_message_var = 'message_in' # default\n self._output_message_var = 'message_out' # default\n self.dispatch(tree)\n print(\"\", file=self.f)\n self.f.flush()\n \n \n def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n \"\"\"\n Gets the device function name by translating a typed Python version to a templated cpp version.\n Python functions looks like getVariableFloatArray6 and translate to getVariable<float, 6>\n This function will detect and test against a set of known types and also extract the Array length\n This function returns None if the string is invalid in format but only throws an error if the format is correct but the type is invalid.\n \"\"\"\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name\n \n\n def fill(self, text = \"\"):\n \"Indent a piece of text, according to the current indentation level\"\n self.f.write(\"\\n\"+\" \"*self._indent + text)\n\n def write(self, text):\n \"Append a piece of text to the current line.\"\n self.f.write(str(text))\n\n def enter(self):\n \"Print '{', and increase the indentation.\"\n self.write(\"{\")\n self._indent += 1\n\n def leave(self):\n \"Decrease the indentation level and Print '}'\"\n self._indent -= 1\n self.fill(\"}\")\n\n def dispatch(self, tree):\n \"Dispatcher function, dispatching tree type T to method _T.\"\n if isinstance(tree, list):\n for t in tree:\n self.dispatch(t)\n return\n meth = getattr(self, \"_\"+tree.__class__.__name__)\n meth(tree)\n \n def RaiseWarning(self, tree, str):\n warnings.warn(f\"Warning ({tree.lineno}, {tree.col_offset}): {str}\")\n \n def RaiseError(self, tree, str):\n raise CodeGenException(f\"Error ({tree.lineno}, {tree.col_offset}): {str}\")\n\n ############### Cutsom Unparsing methods ###############\n # These are special versions of the ast unparsing #\n # dispatch functions. #\n ########################################################\n \n def dispatchMacroEnvFunction(self, tree, tree_parent):\n \"\"\"\n Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first))\n \"\"\"\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)\n\n def dispatchFGPUFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU function. Arguments must have syntax of `message_in: MessageInType, message_out: MessageOutType`\n Type hinting is required to translate a type into a FLAME GPU Message type implementation\n \"\"\"\n # reset the locals variable stack\n self._locals = [\"pyflamegpu\"]\n if len(tree.args.args) != 2:\n self.RaiseError(tree, \"Expected two FLAME GPU function arguments (input message and output message)\")\n # input message\n if not tree.args.args[0].annotation:\n self.RaiseError(tree.args.args[0], \"Message input requires a supported type annotation\")\n if not isinstance(tree.args.args[0].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[0], \"Message input type annotation should be an attribute of the form pyflamegpu.MessageType\")\n if not isinstance(tree.args.args[0].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[0], \"Message output type annotation should be an attribute of the form pyflamegpu.MessageType\")\n input_message_attr = tree.args.args[0].annotation.value.id + \".\" + tree.args.args[0].annotation.attr\n if input_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[0], \"Message input type annotation not a supported message type\")\n self._input_message_var = tree.args.args[0].arg # store the message input variable name\n self.write(f\"flamegpu::{tree.args.args[0].annotation.attr}\") # requires namespace\n self.write(\", \")\n # output message\n if not tree.args.args[1].annotation:\n self.RaiseError(tree.args.args[1], \"Message output requires a supported type annotation\")\n if not isinstance(tree.args.args[1].annotation, ast.Attribute):\n self.RaiseError(tree.args.args[1], \"Message output type annotation should be an attribute of the form pyflamegpu.MessageType\")\n if not isinstance(tree.args.args[1].annotation.value, ast.Name):\n self.RaiseError(tree.args.args[1], \"Message output type annotation should be an attribute of the form pyflamegpu.MessageType\")\n output_message_attr = tree.args.args[1].annotation.value.id + \".\" + tree.args.args[1].annotation.attr\n if output_message_attr not in self.fgpu_message_types:\n self.RaiseError(tree.args.args[1], \"Message output type annotation not a supported message type\")\n self._output_message_var = tree.args.args[1].arg # store the message output variable name\n self.write(f\"flamegpu::{tree.args.args[1].annotation.attr}\") # requires namespace\n \n def dispatchType(self, tree):\n \"\"\"\n There is a limited set of types and formats of type description supported. Types can be either;\n 1) A python built in type of int or float, or\n 2) A subset of numpy types prefixed with either numpy or np. e.g. np.int16\n This function translates and a catches unsupported types but does not translate a function call (i.e. cast)\n \"\"\"\n if isinstance(tree, ast.Name):\n if tree.id not in self.basic_arg_types:\n self.RaiseError(tree, \"Not a supported type\")\n self.write(tree.id)\n elif isinstance(tree, ast.Attribute):\n if not isinstance(tree.value, ast.Name) :\n self.RaiseError(tree, \"Not a supported type\")\n if not (tree.value.id == \"numpy\" or tree.value.id == \"np\"):\n self.RaiseError(tree, \"Not a supported type\")\n if tree.attr not in self.numpytypes:\n self.RaiseError(tree, \"Not a supported numpy type\")\n self.write(self.numpytypes[tree.attr])\n else:\n self.RaiseError(tree, \"Not a supported type\")\n \n def dispatchFGPUDeviceFunctionArgs(self, tree):\n \"\"\"\n Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp.\n \"\"\"\n # reset the locals variable stack\n self._locals = [\"pyflamegpu\"]\n # input message\n first = True\n annotation = None\n for arg in tree.args.args:\n # ensure that there is a type annotation\n if not arg.annotation:\n self.RaiseError(arg, \"Device function argument requires type annotation\")\n # comma if not first\n if not first:\n self.write(\", \")\n self.dispatchType(arg.annotation)\n self.write(f\" {arg.arg}\")\n # add arg to local variable stack\n self._locals.append(arg.arg)\n first = False \n \n def dispatchMessageIteratorCall(self, tree):\n \"\"\"\n Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap())\n Using this function avoid using the global call one which may accept member function calls to things that are not iterators.\n \"\"\"\n # simple case not a member function just an iterator with arguments\n if isinstance(tree.func, ast.Name):\n self.write(f\"FLAMEGPU->{tree.func.id}\")\n if isinstance(tree.func, ast.Attribute) :\n if isinstance(tree.func.value, ast.Name):\n # check that the iterator is supported\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree, f\"Message input loop iterator '{tree.func.attr}' is not supported.\")\n self.write(f\"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}\")\n else:\n self.RaiseError(tree, \"Message input loop iterator format incorrect.\")\n\n # handle function arguments \n self.write(\"(\")\n self._CallArguments(tree)\n self.write(\")\")\n\n def dispatchMessageLoop(self, tree):\n \"\"\"\n This is a special case of a range based for loop in which iterator item returns a const referecne to the message.\n Any user specified message value can be used.\n \"\"\"\n self.fill(\"for (const auto& \")\n self.dispatch(tree.target)\n self.write(\" : \")\n # if simple message iterator\n if isinstance(tree.iter, ast.Name):\n if not tree.iter.id == self._input_message_var:\n self.RaiseError(t, f\"Message input loop requires use of '{self._input_message_var}' as iterator.\")\n # write with prefix\n self.write(f\"FLAMEGPU->{self._input_message_var}\")\n # if it is a call then handle the different cases\n elif isinstance(tree.iter, ast.Call):\n self.dispatchMessageIteratorCall(tree.iter)\n #otherwise not supported\n else :\n self.RaiseError(tree, f\"Message input loop iterator in unsupported format\")\n self.write(\")\")\n self._message_iterator_var = tree.target.id\n self.enter()\n self.dispatch(tree.body)\n self.leave()\n self._message_iterator_var = None\n \n def dispatchMemberFunction(self, t, t_parent):\n \"\"\"\n A very limited set of function calls to members are supported so these are fully evaluated here.\n t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties)\n Function calls permitted are;\n * pyflamegpu.function - a supported function call. e.g. pyflamegpu.getVariableFloat(). This will be translated into a typed Cpp call.\n * message_input.function - a call to the message input variable (the name of which is specified in the function definition)\n * msg.function - a call to the message input iterator objection variable (the name of which is specified in the message function loop)\n * message_output.function - a call to the message output variable (the name of which is specified in the function definition)\n * pyflamegpu.environment.function - the only nested attribute type. This will be translated into a typed Cpp call.\n * math.function - Any function calls from python `math` are translated to calls raw function calls. E.g. `math.sin()` becomes `sin()`\n * numpy.type - Any numpy types are translated to static casts\n \"\"\"\n # it could be possible that the Call object has no value property e.g. a()()\n if not hasattr(t, \"value\"):\n self.RaiseError(t, f\"Function call is in an unsupported format.\")\n\n # Nested member functions (e.g. x.y.z())\n if isinstance(t.value, ast.Attribute):\n # store some information about the source of this function call in parent as this may be useful for validation in whatever has called this function\n t_parent.call_type = None\n # only nested attribute type is environment\n if not isinstance(t.value.value, ast.Name):\n self.RaiseError(t, \"Unknown or unsupported nested attribute\")\n # pyflamegpu.environment\n if t.value.value.id == \"pyflamegpu\" and t.value.attr == \"environment\":\n # check it is a supported environment function\n self.write(\"FLAMEGPU->environment.\")\n if t.attr in self.fgpu_env_funcs: \n # proceed\n self.write(t.attr)\n else: \n # simple getProperty type function\n if t.attr.startswith('getProperty') :\n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"getProperty\"])\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' is not a supported pyflamegpu.environment property function.\")\n # write the getProperty type function\n self.write(py_func)\n t_parent.call_type = \"Environment\"\n # need to catch case of getMacroProperty as arguments need to be translated into template parameters in cpp (and py_func can be ignored)\n elif t.attr.startswith(\"getMacroProperty\"):\n # possible getter setter type function (Note: getMacroProperty only supports a subset of types but type checking is not performed. This is best left to the compiler.)\n # no not permit lengths (e.g. Float4) as these will be passed as arguments\n py_func = self._deviceVariableFunctionName(t, [\"getMacroProperty\"], allow_lengths=False)\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' is not a supported pyflamegpu.environment macro property function.\")\n # handle case\n self.dispatchMacroEnvFunction(t, t_parent)\n t_parent.call_type = \"MacroEnvironment\"\n else:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in pyflamegpu.environment object\")\n \n # pyflamegpu.random\n elif t.value.value.id == \"pyflamegpu\" and t.value.attr == \"random\":\n # check it is a supported random function\n self.write(\"FLAMEGPU->random.\")\n if t.attr in self.fgpu_rand_funcs: \n # proceed\n self.write(t.attr)\n else: \n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"uniform\", \"normal\", \"logNormal\"], allow_lengths=False)\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in pyflamegpu.random object\")\n # proceed\n self.write(py_func) \n t_parent.call_type = \"Random\"\n elif t.value.value.id == \"pyflamegpu\" and t.value.attr == \"agent_out\":\n # check it is a supported agent_out function\n self.write(\"FLAMEGPU->agent_out.\")\n if t.attr in self.fgpu_agent_out_msg_funcs: \n # proceed\n self.write(t.attr)\n else: \n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"setVariable\"])\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in pyflamegpu.agent_out object\")\n # proceed\n self.write(py_func)\n t_parent.call_type = \"AgentOut\"\n else:\n self.RaiseError(t, f\"Unknown or unsupported nested attribute in {t.value.value.id}\")\n # Non nested member functions (e.g. x.y())\n elif isinstance(t.value, ast.Name):\n # pyflamegpu singleton\n if t.value.id == \"pyflamegpu\":\n # check for legit FGPU function calls \n self.write(\"FLAMEGPU->\")\n if t.attr in self.fgpu_funcs:\n # proceed\n self.write(t.attr)\n else:\n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"getVariable\", \"setVariable\"])\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in pyflamegpu object\")\n # proceed\n self.write(py_func)\n\n # message_in function using whatever variable was named in function declaration (e.g radius)\n elif t.value.id == self._input_message_var:\n # only process functions on message_in that are not iterators\n if t.attr in self.fgpu_input_msg_funcs:\n self.write(f\"FLAMEGPU->{self._input_message_var}.\")\n self.write(t.attr) \n else:\n self.RaiseError(t, f\"Message input variable '{self._input_message_var}' does not have a supported function '{t.attr}'\") \n\n # message input iterator arg\n elif self._message_iterator_var and t.value.id == self._message_iterator_var:\n self.write(f\"{self._message_iterator_var}.\")\n # check for legit FGPU function calls and translate\n if t.attr in self.fgpu_input_msg_iter_var_funcs: \n # proceed\n self.write(t.attr)\n else:\n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"getVariable\"])\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in '{self._message_iterator_var}' message input iterable object\")\n # proceed\n self.write(py_func)\n \n # message output arg\n elif t.value.id == self._output_message_var:\n # check for legit FGPU function calls and translate\n self.write(\"FLAMEGPU->message_out.\")\n if t.attr in self.fgpu_output_msg_funcs: \n # proceed\n self.write(t.attr)\n else:\n # possible getter setter type function\n py_func = self._deviceVariableFunctionName(t, [\"setVariable\"])\n if not py_func:\n self.RaiseError(t, f\"Function '{t.attr}' does not exist in '{self._output_message_var}' message output object\")\n # proceed\n self.write(py_func)\n \n \n \n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n self.write(t.attr)\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n if t.attr in self.numpytypes:\n self.write(f\"static_cast<{self.numpytypes[t.attr]}>\")\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n # allow any call on any locals (too many cases to enforce without type checking)\n elif t.value.id in self._locals:\n self.write(f\"{t.value.id}.{t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifier not supported\")\n # Call is a very nested situation which can occur only on macro environment properties. E.g. 'pyflamegpu.environment.getMacroPropertyInt('a').exchange(10)'\n elif isinstance(t.value, ast.Call):\n # handle the call by recursively calling this function to do the depth first execution of pyflamegpu.environment.getMacroPropertyInt('a')\n self.dispatchMemberFunction(t.value.func, t.value)\n # check that the handler was actually for macro environment \n if t.value.call_type != \"MacroEnvironment\" :\n self.RaiseError(t, f\"Function call {t.attr} is not supported\")\n # now append the outer call by making sure the thing been called is a valid macro env function\n if not t.attr in self.fgpu_env_macro_funcs:\n self.RaiseError(t, f\"Function {t.attr} is not a valid macro environment function\")\n # write inner call args\n self.write(\"(\")\n self._CallArguments(t.value)\n self.write(\")\")\n # write outer function (call args will be completed by _Call)\n self.write(f\".{t.attr}\")\n \n \n else:\n self.RaiseError(t, \"Unsupported function call syntax\")\n \n ############### Unparsing methods ######################\n # There should be one method per concrete grammar type #\n # Constructors should be grouped by sum type. Ideally, #\n # this would follow the order in the grammar, but #\n # currently doesn't. #\n ########################################################\n\n def _Module(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Interactive(self, tree):\n for stmt in tree.body:\n self.dispatch(stmt)\n\n def _Expression(self, tree):\n self.dispatch(tree.body)\n\n # stmt\n def _Expr(self, tree):\n \"\"\"\n Same as a standard python expression but ends with semicolon\n \"\"\"\n # Catch odd case of multi line strings and doc strings which are Expr with a Constant string type value\n if isinstance(tree.value, ast.Constant):\n if isinstance(tree.value.value, str):\n return\n # catch special case of Python 3.7 Where doc string is a Str and not a Constant\n elif isinstance(tree.value, ast.Str):\n return \n # otherwise treat like a normal expression\n self.fill()\n self.dispatch(tree.value)\n self.write(\";\")\n\n def _NamedExpr(self, tree):\n \"\"\"\n No such concept in C++. Standard assignment can be used in any location.\n \"\"\"\n self.write(\"(\")\n self.dispatch(tree.target)\n self.write(\" = \")\n self.dispatch(tree.value)\n self.write(\")\")\n\n def _Import(self, t):\n self.RaiseError(t, \"Importing of modules not supported\")\n\n def _ImportFrom(self, t):\n self.RaiseError(t, \"Importing of modules not supported\")\n\n def _Assign(self, t):\n \"\"\"\n Assignment will use the auto type to define a variable at first use else will perform standard assignment.\n Note: There is no ability to create `const` variables unless this is inferred from the assignment expression.\n Multiple assignment is supported by cpp but not in the translator neither is assignment to complex expressions which are valid python syntax.\n \"\"\"\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")\n\n def _AugAssign(self, t):\n \"\"\"\n Similar to assignment in terms of restrictions. E.g. Allow only single named variable assignments.\n Also requires the named variable to already exist in scope.\n \"\"\"\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")\n\n def _AnnAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n self.fill()\n self.dispatchType(t.annotation)\n self.write(\" \")\n self.dispatch(t.target)\n if t.value:\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")\n\n def _Return(self, t):\n \"\"\"\n Standard cpp like return with semicolon.\n \"\"\"\n self.fill(\"return\")\n if t.value:\n self.write(\" \")\n self.dispatch(t.value)\n self.write(\";\")\n\n def _Pass(self, t):\n self.fill(\";\")\n\n def _Break(self, t):\n self.fill(\"break;\")\n\n def _Continue(self, t):\n self.fill(\"continue;\")\n\n def _Delete(self, t):\n self.RaiseError(t, \"Deletion not supported\")\n\n def _Assert(self, t):\n \"\"\"\n cassert does exist but probably not required in FGPU functions and unclear if supported by jitfy\n \"\"\"\n self.RaiseError(t, \"Assert not supported\")\n\n def _Exec(self, t):\n self.RaiseError(t, \"Exec not supported\")\n\n def _Print(self, t):\n \"\"\"\n This is old school python printing so no need to support\n \"\"\"\n self.RaiseError(t, \"Print not supported\")\n \n def _Global(self, t):\n self.RaiseError(t, \"Use of 'global' not supported\")\n\n def _Nonlocal(self, t):\n self.RaiseError(t, \"Use of 'nonlocal' not supported\")\n\n def _Await(self, t):\n self.RaiseError(t, \"Await not supported\")\n\n def _Yield(self, t):\n self.RaiseError(t, \"Yield not supported\")\n\n def _YieldFrom(self, t):\n self.RaiseError(t, \"Yield from not supported\")\n\n def _Raise(self, t):\n \"\"\"\n Exceptions are obviously supported in cpp but not in CUDA device code\n \"\"\"\n self.RaiseError(t, \"Exception raising not supported\")\n\n def _Try(self, t):\n self.RaiseError(t, \"Exceptions not supported\")\n\n def _TryExcept(self, t):\n self.RaiseError(t, \"Exceptions not supported\")\n\n def _TryFinally(self, t): \n self.RaiseError(t, \"Exceptions not supported\")\n\n def _ExceptHandler(self, t):\n self.RaiseError(t, \"Exceptions not supported\")\n\n def _ClassDef(self, t):\n self.RaiseError(t, \"Class definitions not supported\")\n\n def _FunctionDef(self, t):\n \"\"\"\n Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'.\n Each is then processed in a different way using a specific dispatcher.\n Function calls are actually checked and only permitted (or user defined) function calls are supported.\n \"\"\"\n self.write(\"\\n\")\n # check decorators\n if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):\n self.RaiseError(t, \"Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\") \n # FLAMEGPU_AGENT_FUNCTION\n if t.decorator_list[0].attr == 'agent_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n if getattr(t, \"returns\", False):\n self.RaiseWarning(t, \"Function definition return type not supported on 'pyflamegpu.agent_function'\")\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION({t.name}, \")\n self.dispatchFGPUFunctionArgs(t)\n self.write(\")\")\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'device_function' and t.decorator_list[0].value.id == 'pyflamegpu':\n self.fill(f\"FLAMEGPU_DEVICE_FUNCTION \")\n if t.returns:\n self.dispatchType(t.returns)\n else:\n self.write(\"void\")\n self.write(f\" {t.name}(\")\n self.dispatchFGPUDeviceFunctionArgs(t)\n self.write(\")\")\n # add to list of defined functions that can be called\n self._device_functions.append(t.name)\n # FLAMEGPU_DEVICE_FUNCTION\n elif t.decorator_list[0].attr == 'agent_function_condition' and t.decorator_list[0].value.id == 'pyflamegpu':\n # check for return annotation\n if not hasattr(t, \"returns\"):\n self.RaiseError(t, \"Agent function conditions must have a 'bool' return type specified as a return type annotation\")\n # check for return annotation type\n if not isinstance(t.returns, ast.Name):\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n if t.returns.id is not 'bool':\n self.RaiseError(t, \"Agent function conditions return type must be 'bool'\")\n # check to ensure no arguments (discard any with a warning)\n if t.args.args:\n self.RaiseWarning(t, \"Agent function conditions does not support arguments. These will be discarded.\")\n # write the agent function macro\n self.fill(f\"FLAMEGPU_AGENT_FUNCTION_CONDITION({t.name})\")\n else:\n self.RaiseError(t, \"Function definition uses an unsupported decorator. Must use either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n\n def _AsyncFunctionDef(self, t):\n self.RaiseError(t, \"Async functions not supported\")\n\n def _For(self, t):\n \"\"\"\n Two type for for loop are supported. Either;\n 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in'\n 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop\n \"\"\"\n # if message loop then process differently\n if isinstance(t.iter, ast.Name):\n if t.iter.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t, \"Range based for loops only support message iteration using 'message_in' iterator\")\n # do not support for else\n elif t.orelse:\n self.RaiseError(t, \"For else not supported\")\n # allow calls but only to range function\n elif isinstance(t.iter, ast.Call):\n # simple function call e.g. message_in() or range()\n if isinstance(t.iter.func, ast.Name):\n # catch case of message_input with arguments (e.g. spatial messaging)\n if t.iter.func.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n # otherwise permit only range based for loops\n elif t.iter.func.id == \"range\":\n # switch on different uses of range based on number of arguments\n if len(t.iter.args) == 1:\n self.fill(f\"for (int \")\n self.dispatch(t.target)\n self.write(\"=0;\")\n self.dispatch(t.target)\n self.write(\"<\")\n self.dispatch(t.iter.args[0])\n self.write(\";\")\n self.dispatch(t.target)\n self.write(\"++)\")\n elif len(t.iter.args) == 2:\n self.fill(f\"for (int \")\n self.dispatch(t.target)\n self.write(\"=\")\n self.dispatch(t.iter.args[0])\n self.write(\";\")\n self.dispatch(t.target)\n self.write(\"<\")\n self.dispatch(t.iter.args[1])\n self.write(\";\")\n self.dispatch(t.target)\n self.write(\"++)\")\n elif len(t.iter.args) == 3:\n self.fill(f\"for (int \")\n self.dispatch(t.target)\n self.write(\"=\")\n self.dispatch(t.iter.args[0])\n self.write(\";\")\n self.dispatch(t.target)\n self.write(\"<\")\n self.dispatch(t.iter.args[1])\n self.write(\";\")\n self.dispatch(t.target)\n self.write(\"+=\")\n self.dispatch(t.iter.args[2])\n self.write(\")\")\n else:\n self.RaiseError(t, \"Range based for loops requires use of 'range' function with arguments and not keywords\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n else:\n self.RaiseError(t, \"Range based for loops only support calls to the 'range' function\")\n # member function call can only be on message_in.func() type call.\n elif isinstance(t.iter.func, ast.Attribute):\n # must be an attribute (e.g. calling a member of message_in)\n if t.iter.func.value.id == self._input_message_var:\n self.dispatchMessageLoop(t)\n else:\n self.RaiseError(t, \"Range based for loops only support calling members of message input variable\")\n else:\n self.RaiseError(t, \"Range based for loops only support message iteration or use of 'range'\")\n else:\n self.RaiseError(t, \"Range based for loops only support message iteration or use of 'range'\")\n\n def _AsyncFor(self, t):\n self.RaiseError(t, \"Async for not supported\") \n\n def _If(self, t):\n \"\"\"\n Fairly straightforward translation to if, else if, else format\n \"\"\"\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()\n\n def _While(self, t):\n \"\"\"\n Straightforward translation to c style while loop\n \"\"\"\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")\n\n def _With(self, t):\n self.RaiseError(t, \"With not supported\")\n\n def _AsyncWith(self, t):\n self.RaiseError(t, \"Async with not supported\")\n\n # expr\n def _Bytes(self, t):\n self.RaiseError(t, \"Byte strings and Bytes function not supported\")\n\n def _Str(self, tree):\n # force writing in double quotes\n self.write(f'\"{tree.s}\"')\n \n def _JoinedStr(self, t):\n self.RaiseError(t, \"Joined strings not supported\")\n\n def _FormattedValue(self, t):\n self.RaiseError(t, \"Formatted strings not supported\")\n\n def _fstring_JoinedStr(self, t, write):\n self.RaiseError(t, \"F strings not supported\")\n\n def _fstring_Str(self, t, write):\n self.RaiseError(t, \"F strings not supported\")\n\n def _fstring_Constant(self, t, write):\n self.RaiseError(t, \"F strings not supported\")\n\n def _fstring_FormattedValue(self, t, write):\n self.RaiseError(t, \"F strings not supported\")\n\n def _Name(self, t):\n \"\"\"\n Everything ends up as a Name once it is an identifier\n \"\"\"\n self.write(t.id)\n\n def _NameConstant(self, t):\n # Required only for Python 3.7\n if t.value == None:\n self.write(0)\n elif t.value:\n self.write(\"true\")\n else:\n self.write(\"false\")\n\n def _Repr(self, t):\n self.RaiseError(t, \"Repr not supported\")\n \n def _Constant(self, t):\n \"\"\"\n Restrict most types of constant except for numeric types and constant strings\n Picks up some obvious conversions such as None and Bools\n \"\"\"\n value = t.value\n if isinstance(value, tuple):\n self.RaiseError(t, \"Tuples not supported\")\n if isinstance(value, dict):\n self.RaiseError(t, \"Dictionaries not supported\")\n if isinstance(value, list):\n self.RaiseError(t, \"Lists not supported\")\n elif value is Ellipsis: # instead of `...` for Py2 compatibility\n self.RaiseError(t, \"Ellipsis not supported\")\n elif isinstance(value, str): \n self.write(f'\"{value}\"')\n elif isinstance(value, (bytes, bytearray)): # reject bytes strings e.g. b'123' \n self.RaiseError(t, \"Byte strings and Bytes function not supported\")\n elif isinstance(value, bool):\n if value:\n self.write(\"true\")\n else:\n self.write(\"false\")\n elif value == None:\n self.write(0)\n else:\n self.write(repr(value))\n\n def _Num(self, t):\n self.write(repr(t.n))\n\n def _List(self, t):\n self.RaiseError(t, \"Lists not supported\")\n\n def _ListComp(self, t):\n self.RaiseError(t, \"List comprehension not supported\")\n\n def _GeneratorExp(self, t):\n self.RaiseError(t, \"Generator expressions not supported\")\n\n def _SetComp(self, t):\n self.RaiseError(t, \"Set comprehension not supported\")\n\n def _DictComp(self, t):\n self.RaiseError(t, \"Dictionary comprehension not supported\")\n\n def _comprehension(self, t):\n self.RaiseError(t, \"Comprehension not supported\")\n\n def _IfExp(self, t):\n \"\"\"\n Equivalent to a ternary operator\n \"\"\"\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)\n\n\n def _Set(self, t):\n self.RaiseError(t, \"Sets not supported\")\n\n def _Dict(self, t):\n self.RaiseError(t, \"Dictionaries not supported\")\n\n def _Tuple(self, t):\n self.RaiseError(t, \"Tuples not supported\")\n\n unop = {\"Invert\":\"~\", \"Not\": \"!\", \"UAdd\":\"+\", \"USub\":\"-\"}\n def _UnaryOp(self, t):\n \"\"\"\n Translate to C equivalent opertaors\n \"\"\"\n self.write(\"(\")\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(\")\")\n\n binop = { \"Add\":\"+\", \"Sub\":\"-\", \"Mult\":\"*\", \"MatMult\":\"@\", \"Div\":\"/\", \"Mod\":\"%\",\n \"LShift\":\"<<\", \"RShift\":\">>\", \"BitOr\":\"|\", \"BitXor\":\"^\", \"BitAnd\":\"&\",\n \"FloorDiv\":\"//\", \"Pow\": \"**\"}\n def _BinOp(self, t):\n \"\"\"\n Python style pow and floordiv are not supported so translate to a function call.\n No matrix mul support.\n \"\"\"\n op_name = t.op.__class__.__name__\n # translate pow into function call (no float version)\n if op_name == \"Pow\":\n self.write(\"pow(\")\n self.dispatch(t.left)\n self.write(\", \")\n self.dispatch(t.right)\n self.write(\")\")\n # translate floor div into function call (no float version)\n elif op_name == \"FloorDiv\":\n self.write(\"floor(\")\n self.dispatch(t.left)\n self.write(\"/\")\n self.dispatch(t.right)\n self.write(\")\")\n elif op_name == \"MatMult\":\n self.RaiseError(t, \"Matrix multiplier operator not supported\")\n else:\n self.write(\"(\")\n self.dispatch(t.left)\n self.write(\" \" + self.binop[op_name] + \" \")\n self.dispatch(t.right)\n self.write(\")\")\n\n cmpops = {\"Eq\":\"==\", \"NotEq\":\"!=\", \"Lt\":\"<\", \"LtE\":\"<=\", \"Gt\":\">\", \"GtE\":\">=\",\n \"Is\":\"==\", \"IsNot\":\"!=\", \"In\":\"in\", \"NotIn\":\"not in\"}\n def _Compare(self, t):\n self.dispatch(t.left)\n for o, e in zip(t.ops, t.comparators):\n # detect list ops\n if o.__class__.__name__ == \"In\" or o.__class__.__name__ == \"NotIn\":\n self.RaiseError(t, \"In and NotIn operators not supported\")\n self.write(\" \" + self.cmpops[o.__class__.__name__] + \" \")\n self.dispatch(e)\n\n boolops = {ast.And: '&&', ast.Or: '||'}\n def _BoolOp(self, t):\n \"\"\"\n Translate to logical and/or operators in C\n \"\"\"\n self.write(\"(\")\n s = \" %s \" % self.boolops[t.op.__class__]\n interleave(lambda: self.write(s), self.dispatch, t.values)\n self.write(\")\")\n \n def _Attribute(self,t):\n \"\"\"\n A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function.\n Attributes supported are only;\n * pyflamegpu.attribute - a supported attribute e.g. pyflamegpu.ALIVE. This will be translated into a namespace member.\n * math.constant - Any supported math constants are translated to C definition versions\n \"\"\"\n # Only a limited set of globals supported\n func_dict = None\n \n # pyflamegpu singleton\n if isinstance(t.value, ast.Name):\n if t.value.id == \"pyflamegpu\":\n if t.attr in self.fgpu_attrs:\n # proceed\n self.write(\"flamegpu::\")\n self.write(t.attr)\n else:\n self.RaiseError(t, f\"Attribute '{t.attr}' does not exist in pyflamegpu object\")\n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n # not sure how a numpy attribute would be used without function call or type hint but translate anyway \n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, \"Unsupported attribute\")\n\n def _CallArguments(self, t):\n comma = False\n for e in t.args:\n if comma: self.write(\", \")\n else: comma = True\n self.dispatch(e)\n if len(t.keywords):\n self.RaiseWarning(t, \"Keyword argument not supported. Ignored.\")\n if sys.version_info[:2] < (3, 5):\n if t.starargs:\n self.RaiseWarning(t, \"Starargs not supported. Ignored.\")\n if t.kwargs:\n self.RaiseWarning(t, \"Kwargs not supported. Ignored.\")\n \n def _Call(self, t):\n \"\"\"\n Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function.\n A special dispatcher is required \n \"\"\"\n # check calls but let attributes check in their own dispatcher\n funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator\n if isinstance(t.func, ast.Name):\n if (t.func.id not in funcs):\n self.RaiseWarning(t, \"Function call is not a defined FLAME GPU device function or a supported python built in.\")\n # dispatch even if warning raised\n self.dispatch(t.func)\n elif isinstance(t.func, ast.Lambda):\n self.dispatch(t.func) # not supported\n else:\n # special handler for dispatching member function calls\n # This would otherwise be an attribute\n self.dispatchMemberFunction(t.func, t) \n self.write(\"(\")\n self._CallArguments(t)\n self.write(\")\")\n\n def _Subscript(self, t):\n \"\"\"\n Arrays are not supported but subscript allows accessing array like variables which is required for macro environment properties (e.g. a[0][1][2])\n Obvious limitation is no slicing type syntax (e.g. a[:2])\n \"\"\"\n self.dispatch(t.value)\n self.write(\"[\")\n self.dispatch(t.slice)\n self.write(\"]\")\n\n def _Starred(self, t):\n self.RaiseError(t, \"Starred values not supported\")\n\n # slice\n def _Ellipsis(self, t):\n self.RaiseError(t, \"Ellipsis values not supported\")\n\n def _Index(self, t):\n self.RaiseError(t, \"Index values not supported\")\n\n def _Slice(self, t):\n self.RaiseError(t, \"Slicing values not supported\")\n\n def _ExtSlice(self, t):\n self.RaiseError(t, \"ExtSlice values not supported\")\n\n # argument\n def _arg(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, \"Arguments should already have been processed\")\n\n # others\n def _arguments(self, t):\n \"\"\"\n Arguments should be processed by a custom dispatcher and it should not be possible to get here\n \"\"\"\n self.RaiseError(t, \"Arguments should already have been processed\")\n\n def _keyword(self, t):\n self.RaiseError(t, \"Keywords are not supported\")\n\n def _Lambda(self, t):\n self.RaiseError(t, \"Lambda is not supported\")\n\n def _alias(self, t):\n self.RaiseError(t, \"Aliasing is not supported\")\n\n def _withitem(self, t):\n self.RaiseError(t, \"With not supported\")\n",
"step-ids": [
43,
45,
82,
94,
103
]
}
|
[
43,
45,
82,
94,
103
] |
# 938. Range Sum of BST
# Share
# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).
# The binary search tree is guaranteed to have unique values.
# Example 1:
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
# Example 2:
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
# Note:
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
# class Solution:
# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
# result = self.cal_sum(root, L, R, 0)
# return result
# def cal_sum(self, root, L, R, result):
# if not root:
# return result
# left = self.cal_sum(root.left, L, R, result)
# right = self.cal_sum(root.right, L, R, result)
# if root.val < L or root.val > R:
# return left + right
# return left + right + root.val
# Better Solution
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
result = self.cal_sum(root, L, R, 0)
return result
def cal_sum(self, root, L, R, result):
if not root:
return result
left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)
right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)
if root.val < L or root.val > R:
return left + right
return left + right + root.val
|
normal
|
{
"blob_id": "8e1de62f2490d2276a834ae1ab0f1958649fa821",
"index": 5503,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n",
"step-3": "class Solution:\n <mask token>\n\n def cal_sum(self, root, L, R, result):\n if not root:\n return result\n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n if root.val < L or root.val > R:\n return left + right\n return left + right + root.val\n",
"step-4": "class Solution:\n\n def rangeSumBST(self, root: TreeNode, L: int, R: int) ->int:\n result = self.cal_sum(root, L, R, 0)\n return result\n\n def cal_sum(self, root, L, R, result):\n if not root:\n return result\n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n if root.val < L or root.val > R:\n return left + right\n return left + right + root.val\n",
"step-5": "# 938. Range Sum of BST\n\n\n# Share\n# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).\n\n# The binary search tree is guaranteed to have unique values.\n\n \n\n# Example 1:\n\n# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15\n# Output: 32\n# Example 2:\n\n# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10\n# Output: 23\n \n\n# Note:\n\n# The number of nodes in the tree is at most 10000.\n# The final answer is guaranteed to be less than 2^31.\n\n# class Solution:\n# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n \n# result = self.cal_sum(root, L, R, 0)\n \n# return result\n \n# def cal_sum(self, root, L, R, result):\n \n# if not root:\n# return result\n \n# left = self.cal_sum(root.left, L, R, result)\n# right = self.cal_sum(root.right, L, R, result)\n \n# if root.val < L or root.val > R:\n# return left + right\n \n# return left + right + root.val\n\n\n# Better Solution\nclass Solution:\n def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n \n result = self.cal_sum(root, L, R, 0)\n \n return result\n \n def cal_sum(self, root, L, R, result):\n \n if not root:\n return result\n \n \n left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)\n \n right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)\n \n if root.val < L or root.val > R:\n return left + right\n \n return left + right + root.val",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(ports)
for p in ports:
print(p[1])
if 'Arduino' in p[1]:
ser = serial.Serial(port=p[0])
else:
print('No Arduino Device was found connected to the computer')
<|reserved_special_token_0|>
while True:
ct += 1
ret, img = cap.read()
center = [img.shape[0] / 2, img.shape[1] / 2]
faces = face_cascade.detectMultiScale(img, 1.3, 5)
tmp = 0
for x, y, w, h in faces:
tmp += 1
if tmp > 1:
print('too many faces')
else:
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_color = img[y:y + h, x:x + w]
x_d = x + w / 2 - 325 - 73
dis = -0.88 * w + 220
angle = x_d
currentpos = angle
currentdis = dis
currentx_d = x_d
if ct == 1:
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
print(str(int(angle)).encode())
if angle < 0:
ser.write(str(int(angle)).encode())
else:
ser.write(('+' + str(int(angle))).encode())
time.sleep(1)
if lastpos - currentpos < 10 and abs(angle) < 15:
shoot += 1
if shoot > 1:
time.sleep(2)
ser.write(str(10000).encode())
time.sleep(2)
shoot = 0
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
cv2.imshow('img', img)
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ports = list(serial.tools.list_ports.comports())
print(ports)
for p in ports:
print(p[1])
if 'Arduino' in p[1]:
ser = serial.Serial(port=p[0])
else:
print('No Arduino Device was found connected to the computer')
cap = cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')
lastpos = 0
currentpos = 0
lastdis = 0
currentdis = 0
lastx_d = 0
currentx_d = 0
shoot = 0
ct = 0
while True:
ct += 1
ret, img = cap.read()
center = [img.shape[0] / 2, img.shape[1] / 2]
faces = face_cascade.detectMultiScale(img, 1.3, 5)
tmp = 0
for x, y, w, h in faces:
tmp += 1
if tmp > 1:
print('too many faces')
else:
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_color = img[y:y + h, x:x + w]
x_d = x + w / 2 - 325 - 73
dis = -0.88 * w + 220
angle = x_d
currentpos = angle
currentdis = dis
currentx_d = x_d
if ct == 1:
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
print(str(int(angle)).encode())
if angle < 0:
ser.write(str(int(angle)).encode())
else:
ser.write(('+' + str(int(angle))).encode())
time.sleep(1)
if lastpos - currentpos < 10 and abs(angle) < 15:
shoot += 1
if shoot > 1:
time.sleep(2)
ser.write(str(10000).encode())
time.sleep(2)
shoot = 0
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
cv2.imshow('img', img)
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
import serial
import serial.tools.list_ports
import time
import random
import math
ports = list(serial.tools.list_ports.comports())
print(ports)
for p in ports:
print(p[1])
if 'Arduino' in p[1]:
ser = serial.Serial(port=p[0])
else:
print('No Arduino Device was found connected to the computer')
cap = cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')
lastpos = 0
currentpos = 0
lastdis = 0
currentdis = 0
lastx_d = 0
currentx_d = 0
shoot = 0
ct = 0
while True:
ct += 1
ret, img = cap.read()
center = [img.shape[0] / 2, img.shape[1] / 2]
faces = face_cascade.detectMultiScale(img, 1.3, 5)
tmp = 0
for x, y, w, h in faces:
tmp += 1
if tmp > 1:
print('too many faces')
else:
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_color = img[y:y + h, x:x + w]
x_d = x + w / 2 - 325 - 73
dis = -0.88 * w + 220
angle = x_d
currentpos = angle
currentdis = dis
currentx_d = x_d
if ct == 1:
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
print(str(int(angle)).encode())
if angle < 0:
ser.write(str(int(angle)).encode())
else:
ser.write(('+' + str(int(angle))).encode())
time.sleep(1)
if lastpos - currentpos < 10 and abs(angle) < 15:
shoot += 1
if shoot > 1:
time.sleep(2)
ser.write(str(10000).encode())
time.sleep(2)
shoot = 0
lastpos = currentpos
lastdis = currentdis
lastx_d = currentx_d
cv2.imshow('img', img)
if cv2.waitKey(1) & 255 == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import numpy as np
import cv2
import serial
import serial.tools.list_ports
import time
import random
import math
#import mcpi.minecraft as minecraft
#import mcpi.block as block
#from house import House
#Arduino Serials
ports = list(serial.tools.list_ports.comports())
print (ports)
for p in ports:
print (p[1])
if "Arduino" in p[1]:
ser=serial.Serial(port=p[0])
else :
print ("No Arduino Device was found connected to the computer")
#time.sleep(2)
#face detection
cap =cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')
lastpos=0
currentpos=0
lastdis=0
currentdis=0
lastx_d=0
currentx_d=0
shoot=0
#MC
#mc=minecraft.Minecraft.create()
#pos=mc.player.getTilePos()
#pos0=[]
#pos0.append(pos.x)
#pos0.append(pos.y)
#pos0.append(pos.z)
#des=House([pos.x+20,pos.y,pos.z],mc,block.GOLD_BLOCK.id,block.GLASS.id)
#des.buildall()
ct=0
while(True):
ct+=1
#到达目的地了吗
#if(des.isInsideHouse()):
#mc.postToChat("You win")
#break
#人脸识别,一方面投石机追踪,一方面控制MC里面人到Destinatioin
ret,img=cap.read()
center=[img.shape[0]/2,img.shape[1]/2]
faces = face_cascade.detectMultiScale(img, 1.3, 5)
tmp=0
for(x,y,w,h) in faces:
tmp+=1
if(tmp>1):
print("too many faces")
else:
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_color = img[y:y+h, x:x+w]
x_d=x+w/2-325-73
dis=(-0.88*w+220)
angle=x_d#math.atan(x_d/dis)/3.1415926535897*180
currentpos=angle
currentdis=dis
currentx_d=x_d
if(ct==1):
lastpos=currentpos
lastdis=currentdis
lastx_d=currentx_d
#pos=mc.player.getTilePos()
#mc.player.setTilePos([pos.x+(currentx_d-lastx_d)/5,pos.y,pos.z+(currentdis-lastdis)/5])
#print(x_d)
#print(angle)
#ser.write
print(str(int(angle)).encode())
#ser.write
if(angle<0):
ser.write(str(int(angle)).encode())
else:
ser.write(("+"+str(int(angle))).encode())
time.sleep(1)
if((lastpos-currentpos)<10 and abs(angle)<15):
shoot+=1
if(shoot>1):
time.sleep(2)
#mc.player.setTilePos([0,-1000,0])
ser.write(str(10000).encode())
time.sleep(2)
shoot=0
lastpos=currentpos
lastdis=currentdis
lastx_d=currentx_d
cv2.imshow('img',img)
if cv2.waitKey(1)& 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "7c80c98e32f386362003ac3cd729fa9b279b8e8e",
"index": 7316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\n<mask token>\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nports = list(serial.tools.list_ports.comports())\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\ncap = cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\nlastpos = 0\ncurrentpos = 0\nlastdis = 0\ncurrentdis = 0\nlastx_d = 0\ncurrentx_d = 0\nshoot = 0\nct = 0\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nimport serial\nimport serial.tools.list_ports\nimport time\nimport random\nimport math\nports = list(serial.tools.list_ports.comports())\nprint(ports)\nfor p in ports:\n print(p[1])\n if 'Arduino' in p[1]:\n ser = serial.Serial(port=p[0])\n else:\n print('No Arduino Device was found connected to the computer')\ncap = cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\nlastpos = 0\ncurrentpos = 0\nlastdis = 0\ncurrentdis = 0\nlastx_d = 0\ncurrentx_d = 0\nshoot = 0\nct = 0\nwhile True:\n ct += 1\n ret, img = cap.read()\n center = [img.shape[0] / 2, img.shape[1] / 2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp = 0\n for x, y, w, h in faces:\n tmp += 1\n if tmp > 1:\n print('too many faces')\n else:\n for x, y, w, h in faces:\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_color = img[y:y + h, x:x + w]\n x_d = x + w / 2 - 325 - 73\n dis = -0.88 * w + 220\n angle = x_d\n currentpos = angle\n currentdis = dis\n currentx_d = x_d\n if ct == 1:\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n print(str(int(angle)).encode())\n if angle < 0:\n ser.write(str(int(angle)).encode())\n else:\n ser.write(('+' + str(int(angle))).encode())\n time.sleep(1)\n if lastpos - currentpos < 10 and abs(angle) < 15:\n shoot += 1\n if shoot > 1:\n time.sleep(2)\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot = 0\n lastpos = currentpos\n lastdis = currentdis\n lastx_d = currentx_d\n cv2.imshow('img', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\nimport cv2\nimport serial\nimport serial.tools.list_ports\nimport time\nimport random\nimport math\n#import mcpi.minecraft as minecraft\n#import mcpi.block as block\n#from house import House\n\n\n\n#Arduino Serials\nports = list(serial.tools.list_ports.comports())\nprint (ports)\nfor p in ports:\n print (p[1])\n if \"Arduino\" in p[1]:\n\t ser=serial.Serial(port=p[0])\n else :\n\t print (\"No Arduino Device was found connected to the computer\")\n#time.sleep(2)\n#face detection\t \ncap =cv2.VideoCapture(1)\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml')\n\nlastpos=0\ncurrentpos=0\nlastdis=0\ncurrentdis=0\nlastx_d=0\ncurrentx_d=0\nshoot=0\n#MC\n#mc=minecraft.Minecraft.create()\n#pos=mc.player.getTilePos()\n#pos0=[]\n#pos0.append(pos.x)\n#pos0.append(pos.y)\n#pos0.append(pos.z)\n#des=House([pos.x+20,pos.y,pos.z],mc,block.GOLD_BLOCK.id,block.GLASS.id)\n#des.buildall()\n\nct=0\nwhile(True):\n ct+=1\n #到达目的地了吗\n #if(des.isInsideHouse()):\n #mc.postToChat(\"You win\")\n #break\n #人脸识别,一方面投石机追踪,一方面控制MC里面人到Destinatioin\n ret,img=cap.read()\n center=[img.shape[0]/2,img.shape[1]/2]\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n tmp=0\n for(x,y,w,h) in faces:\n tmp+=1\n if(tmp>1):\n print(\"too many faces\")\n else:\n for (x,y,w,h) in faces:\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_color = img[y:y+h, x:x+w]\n \n x_d=x+w/2-325-73\n dis=(-0.88*w+220)\n angle=x_d#math.atan(x_d/dis)/3.1415926535897*180\n currentpos=angle\n currentdis=dis\n currentx_d=x_d\n if(ct==1):\n lastpos=currentpos\n lastdis=currentdis\n lastx_d=currentx_d\n #pos=mc.player.getTilePos()\n #mc.player.setTilePos([pos.x+(currentx_d-lastx_d)/5,pos.y,pos.z+(currentdis-lastdis)/5])\n #print(x_d)\n #print(angle)\n #ser.write\n print(str(int(angle)).encode())\n #ser.write\n if(angle<0):\n ser.write(str(int(angle)).encode())\n else:\n ser.write((\"+\"+str(int(angle))).encode())\n time.sleep(1)\n if((lastpos-currentpos)<10 and abs(angle)<15):\n shoot+=1\n if(shoot>1):\n time.sleep(2)\n #mc.player.setTilePos([0,-1000,0])\n ser.write(str(10000).encode())\n time.sleep(2)\n shoot=0\n lastpos=currentpos\n lastdis=currentdis\n lastx_d=currentx_d\n cv2.imshow('img',img)\n if cv2.waitKey(1)& 0xFF==ord('q'):\n break\n \n\ncap.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for img in img_list:
print(img['src'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'http://www.pythonscraping.com/pages/page3.html'
html = urlopen(url)
html_data = BeautifulSoup(html.read(), 'lxml')
img_list = html_data.find_all('img', {'src': re.compile('\\.\\./img*\\.jpg')})
for img in img_list:
print(img['src'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
url = 'http://www.pythonscraping.com/pages/page3.html'
html = urlopen(url)
html_data = BeautifulSoup(html.read(), 'lxml')
img_list = html_data.find_all('img', {'src': re.compile('\\.\\./img*\\.jpg')})
for img in img_list:
print(img['src'])
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 09:10:03 2018
@author: User
"""
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
url = "http://www.pythonscraping.com/pages/page3.html"
html = urlopen(url)
html_data = BeautifulSoup(html.read(), "lxml")
img_list = html_data.find_all("img", {"src": re.compile("\.\./img*\.jpg")})
for img in img_list:
print(img["src"])
|
flexible
|
{
"blob_id": "00609c4972269c36bbfcf5bec2a8648f812b6092",
"index": 9422,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor img in img_list:\n print(img['src'])\n",
"step-3": "<mask token>\nurl = 'http://www.pythonscraping.com/pages/page3.html'\nhtml = urlopen(url)\nhtml_data = BeautifulSoup(html.read(), 'lxml')\nimg_list = html_data.find_all('img', {'src': re.compile('\\\\.\\\\./img*\\\\.jpg')})\nfor img in img_list:\n print(img['src'])\n",
"step-4": "<mask token>\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport re\nurl = 'http://www.pythonscraping.com/pages/page3.html'\nhtml = urlopen(url)\nhtml_data = BeautifulSoup(html.read(), 'lxml')\nimg_list = html_data.find_all('img', {'src': re.compile('\\\\.\\\\./img*\\\\.jpg')})\nfor img in img_list:\n print(img['src'])\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 5 09:10:03 2018\n\n@author: User\n\"\"\"\n\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport re\n\nurl = \"http://www.pythonscraping.com/pages/page3.html\"\nhtml = urlopen(url)\nhtml_data = BeautifulSoup(html.read(), \"lxml\")\nimg_list = html_data.find_all(\"img\", {\"src\": re.compile(\"\\.\\./img*\\.jpg\")})\n\nfor img in img_list:\n print(img[\"src\"])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""Utilities for reading BEL Script."""
import time
from typing import Iterable, Mapping, Optional, Set
from .constants import (
ANNOTATION_PATTERN_FMT, ANNOTATION_URL_FMT, NAMESPACE_PATTERN_FMT, NAMESPACE_URL_FMT, format_annotation_list,
)
__all__ = [
'make_knowledge_header',
]
def make_knowledge_header(name: str,
version: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
contact: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
) -> Iterable[str]:
"""Iterate over lines for the header of a BEL document, with standard document metadata and definitions.
:param name: The unique name for this BEL document
:param version: The version. Defaults to current date in format ``YYYYMMDD``.
:param description: A description of the contents of this document
:param authors: The authors of this document
:param contact: The email address of the maintainer
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
:param namespace_url: an optional dictionary of {str name: str URL} of namespaces
:param namespace_patterns: An optional dictionary of {str name: str regex} namespaces
:param annotation_url: An optional dictionary of {str name: str URL} of annotations
:param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations
:param annotation_list: An optional dictionary of {str name: set of names} of list annotations
"""
yield from make_document_metadata(
name=name,
contact=contact,
description=description,
authors=authors,
version=version,
copyright=copyright,
licenses=licenses,
disclaimer=disclaimer,
)
yield from make_document_namespaces(
namespace_url=namespace_url,
namespace_patterns=namespace_patterns,
)
yield from make_document_annotations(
annotation_url=annotation_url,
annotation_patterns=annotation_patterns,
annotation_list=annotation_list,
)
yield '#' * 80
yield '#| Statements'
yield '#' * 80
def make_document_metadata(name: str,
version: Optional[str] = None,
contact: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
) -> Iterable[str]:
"""Iterate over the lines for the document metadata section of a BEL document.
:param name: The unique name for this BEL document
:param version: The version. Defaults to the current date in ``YYYYMMDD`` format.
:param description: A description of the contents of this document
:param authors: The authors of this document
:param contact: The email address of the maintainer
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
"""
yield '#' * 80
yield '#| Metadata'
yield '#' * 80 + '\n'
yield 'SET DOCUMENT Name = "{}"'.format(name)
yield 'SET DOCUMENT Version = "{}"'.format(version or time.strftime('%Y%m%d'))
if description:
yield 'SET DOCUMENT Description = "{}"'.format(description.replace('\n', ''))
if authors:
yield 'SET DOCUMENT Authors = "{}"'.format(authors)
if contact:
yield 'SET DOCUMENT ContactInfo = "{}"'.format(contact)
if licenses:
yield 'SET DOCUMENT Licenses = "{}"'.format(licenses)
if copyright:
yield 'SET DOCUMENT Copyright = "{}"'.format(copyright)
if disclaimer:
yield 'SET DOCUMENT Disclaimer = "{}"'.format(disclaimer)
yield ''
def make_document_namespaces(namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
) -> Iterable[str]:
"""Iterate over lines for the namespace definitions.
:param namespace_url: dictionary of {str name: str URL} of namespaces
:param namespace_patterns: A dictionary of {str name: str regex}
"""
yield '#' * 80
yield '#| Namespaces'
yield '#' * 80
if namespace_url:
yield '\n# Enumerated Namespaces'
yield '# ---------------------'
for name, url in sorted(namespace_url.items()):
yield NAMESPACE_URL_FMT.format(name, url)
if namespace_patterns:
yield '\n# Regular Expression Namespaces'
yield '# -----------------------------'
for name, pattern in sorted(namespace_patterns.items()):
yield NAMESPACE_PATTERN_FMT.format(name, pattern)
yield ''
def make_document_annotations(annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
) -> Iterable[str]:
"""Iterate over lines for the annotation definitions.
:param annotation_url: A dictionary of {str name: str URL} of annotations
:param annotation_patterns: A dictionary of {str name: str regex}
:param annotation_list: A dictionary of {str name: set of name str}
"""
if annotation_url or annotation_patterns or annotation_list:
yield '#' * 80
yield '#| Annotations'
yield '#' * 80
if annotation_url:
yield '\n# Enumerated Annotations'
yield '# ----------------------'
for name, url in sorted(annotation_url.items()):
yield ANNOTATION_URL_FMT.format(name, url)
if annotation_patterns:
yield '\n# Regular Expression Annotations'
yield '# ------------------------------'
for name, pattern in sorted(annotation_patterns.items()):
yield ANNOTATION_PATTERN_FMT.format(name, pattern)
if annotation_list:
yield '\n# Locally Defined Annotations'
yield '# ---------------------------'
for annotation, values in sorted(annotation_list.items()):
yield format_annotation_list(annotation, values)
yield ''
|
normal
|
{
"blob_id": "46b8d0ba58d4bf17021b05fc03bd480802f65adf",
"index": 6132,
"step-1": "<mask token>\n\n\ndef make_knowledge_header(name: str, version: Optional[str]=None,\n description: Optional[str]=None, authors: Optional[str]=None, contact:\n Optional[str]=None, copyright: Optional[str]=None, licenses: Optional[\n str]=None, disclaimer: Optional[str]=None, namespace_url: Optional[\n Mapping[str, str]]=None, namespace_patterns: Optional[Mapping[str, str]\n ]=None, annotation_url: Optional[Mapping[str, str]]=None,\n annotation_patterns: Optional[Mapping[str, str]]=None, annotation_list:\n Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the header of a BEL document, with standard document metadata and definitions.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n \"\"\"\n yield from make_document_metadata(name=name, contact=contact,\n description=description, authors=authors, version=version,\n copyright=copyright, licenses=licenses, disclaimer=disclaimer)\n yield from make_document_namespaces(namespace_url=namespace_url,\n namespace_patterns=namespace_patterns)\n yield from make_document_annotations(annotation_url=annotation_url,\n annotation_patterns=annotation_patterns, annotation_list=\n annotation_list)\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80\n\n\ndef make_document_metadata(name: str, version: Optional[str]=None, contact:\n Optional[str]=None, description: Optional[str]=None, authors: Optional[\n str]=None, copyright: Optional[str]=None, licenses: Optional[str]=None,\n disclaimer: Optional[str]=None) ->Iterable[str]:\n \"\"\"Iterate over the lines for the document metadata section of a BEL document.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to the current date in ``YYYYMMDD`` format.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n \"\"\"\n yield '#' * 80\n yield '#| Metadata'\n yield '#' * 80 + '\\n'\n yield 'SET DOCUMENT Name = \"{}\"'.format(name)\n yield 'SET DOCUMENT Version = \"{}\"'.format(version or time.strftime(\n '%Y%m%d'))\n if description:\n yield 'SET DOCUMENT Description = \"{}\"'.format(description.replace(\n '\\n', ''))\n if authors:\n yield 'SET DOCUMENT Authors = \"{}\"'.format(authors)\n if contact:\n yield 'SET DOCUMENT ContactInfo = \"{}\"'.format(contact)\n if licenses:\n yield 'SET DOCUMENT Licenses = \"{}\"'.format(licenses)\n if copyright:\n yield 'SET DOCUMENT Copyright = \"{}\"'.format(copyright)\n if disclaimer:\n yield 'SET DOCUMENT Disclaimer = \"{}\"'.format(disclaimer)\n yield ''\n\n\ndef make_document_namespaces(namespace_url: Optional[Mapping[str, str]]=\n None, namespace_patterns: Optional[Mapping[str, str]]=None) ->Iterable[str\n ]:\n \"\"\"Iterate over lines for the namespace definitions.\n\n :param namespace_url: dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: A dictionary of {str name: str regex}\n \"\"\"\n yield '#' * 80\n yield '#| Namespaces'\n yield '#' * 80\n if namespace_url:\n yield '\\n# Enumerated Namespaces'\n yield '# ---------------------'\n for name, url in sorted(namespace_url.items()):\n yield NAMESPACE_URL_FMT.format(name, url)\n if namespace_patterns:\n yield '\\n# Regular Expression Namespaces'\n yield '# -----------------------------'\n for name, pattern in sorted(namespace_patterns.items()):\n yield NAMESPACE_PATTERN_FMT.format(name, pattern)\n yield ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_knowledge_header(name: str, version: Optional[str]=None,\n description: Optional[str]=None, authors: Optional[str]=None, contact:\n Optional[str]=None, copyright: Optional[str]=None, licenses: Optional[\n str]=None, disclaimer: Optional[str]=None, namespace_url: Optional[\n Mapping[str, str]]=None, namespace_patterns: Optional[Mapping[str, str]\n ]=None, annotation_url: Optional[Mapping[str, str]]=None,\n annotation_patterns: Optional[Mapping[str, str]]=None, annotation_list:\n Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the header of a BEL document, with standard document metadata and definitions.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n \"\"\"\n yield from make_document_metadata(name=name, contact=contact,\n description=description, authors=authors, version=version,\n copyright=copyright, licenses=licenses, disclaimer=disclaimer)\n yield from make_document_namespaces(namespace_url=namespace_url,\n namespace_patterns=namespace_patterns)\n yield from make_document_annotations(annotation_url=annotation_url,\n annotation_patterns=annotation_patterns, annotation_list=\n annotation_list)\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80\n\n\ndef make_document_metadata(name: str, version: Optional[str]=None, contact:\n Optional[str]=None, description: Optional[str]=None, authors: Optional[\n str]=None, copyright: Optional[str]=None, licenses: Optional[str]=None,\n disclaimer: Optional[str]=None) ->Iterable[str]:\n \"\"\"Iterate over the lines for the document metadata section of a BEL document.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to the current date in ``YYYYMMDD`` format.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n \"\"\"\n yield '#' * 80\n yield '#| Metadata'\n yield '#' * 80 + '\\n'\n yield 'SET DOCUMENT Name = \"{}\"'.format(name)\n yield 'SET DOCUMENT Version = \"{}\"'.format(version or time.strftime(\n '%Y%m%d'))\n if description:\n yield 'SET DOCUMENT Description = \"{}\"'.format(description.replace(\n '\\n', ''))\n if authors:\n yield 'SET DOCUMENT Authors = \"{}\"'.format(authors)\n if contact:\n yield 'SET DOCUMENT ContactInfo = \"{}\"'.format(contact)\n if licenses:\n yield 'SET DOCUMENT Licenses = \"{}\"'.format(licenses)\n if copyright:\n yield 'SET DOCUMENT Copyright = \"{}\"'.format(copyright)\n if disclaimer:\n yield 'SET DOCUMENT Disclaimer = \"{}\"'.format(disclaimer)\n yield ''\n\n\ndef make_document_namespaces(namespace_url: Optional[Mapping[str, str]]=\n None, namespace_patterns: Optional[Mapping[str, str]]=None) ->Iterable[str\n ]:\n \"\"\"Iterate over lines for the namespace definitions.\n\n :param namespace_url: dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: A dictionary of {str name: str regex}\n \"\"\"\n yield '#' * 80\n yield '#| Namespaces'\n yield '#' * 80\n if namespace_url:\n yield '\\n# Enumerated Namespaces'\n yield '# ---------------------'\n for name, url in sorted(namespace_url.items()):\n yield NAMESPACE_URL_FMT.format(name, url)\n if namespace_patterns:\n yield '\\n# Regular Expression Namespaces'\n yield '# -----------------------------'\n for name, pattern in sorted(namespace_patterns.items()):\n yield NAMESPACE_PATTERN_FMT.format(name, pattern)\n yield ''\n\n\ndef make_document_annotations(annotation_url: Optional[Mapping[str, str]]=\n None, annotation_patterns: Optional[Mapping[str, str]]=None,\n annotation_list: Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the annotation definitions.\n\n :param annotation_url: A dictionary of {str name: str URL} of annotations\n :param annotation_patterns: A dictionary of {str name: str regex}\n :param annotation_list: A dictionary of {str name: set of name str}\n \"\"\"\n if annotation_url or annotation_patterns or annotation_list:\n yield '#' * 80\n yield '#| Annotations'\n yield '#' * 80\n if annotation_url:\n yield '\\n# Enumerated Annotations'\n yield '# ----------------------'\n for name, url in sorted(annotation_url.items()):\n yield ANNOTATION_URL_FMT.format(name, url)\n if annotation_patterns:\n yield '\\n# Regular Expression Annotations'\n yield '# ------------------------------'\n for name, pattern in sorted(annotation_patterns.items()):\n yield ANNOTATION_PATTERN_FMT.format(name, pattern)\n if annotation_list:\n yield '\\n# Locally Defined Annotations'\n yield '# ---------------------------'\n for annotation, values in sorted(annotation_list.items()):\n yield format_annotation_list(annotation, values)\n yield ''\n",
"step-3": "<mask token>\n__all__ = ['make_knowledge_header']\n\n\ndef make_knowledge_header(name: str, version: Optional[str]=None,\n description: Optional[str]=None, authors: Optional[str]=None, contact:\n Optional[str]=None, copyright: Optional[str]=None, licenses: Optional[\n str]=None, disclaimer: Optional[str]=None, namespace_url: Optional[\n Mapping[str, str]]=None, namespace_patterns: Optional[Mapping[str, str]\n ]=None, annotation_url: Optional[Mapping[str, str]]=None,\n annotation_patterns: Optional[Mapping[str, str]]=None, annotation_list:\n Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the header of a BEL document, with standard document metadata and definitions.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n \"\"\"\n yield from make_document_metadata(name=name, contact=contact,\n description=description, authors=authors, version=version,\n copyright=copyright, licenses=licenses, disclaimer=disclaimer)\n yield from make_document_namespaces(namespace_url=namespace_url,\n namespace_patterns=namespace_patterns)\n yield from make_document_annotations(annotation_url=annotation_url,\n annotation_patterns=annotation_patterns, annotation_list=\n annotation_list)\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80\n\n\ndef make_document_metadata(name: str, version: Optional[str]=None, contact:\n Optional[str]=None, description: Optional[str]=None, authors: Optional[\n str]=None, copyright: Optional[str]=None, licenses: Optional[str]=None,\n disclaimer: Optional[str]=None) ->Iterable[str]:\n \"\"\"Iterate over the lines for the document metadata section of a BEL document.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to the current date in ``YYYYMMDD`` format.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n \"\"\"\n yield '#' * 80\n yield '#| Metadata'\n yield '#' * 80 + '\\n'\n yield 'SET DOCUMENT Name = \"{}\"'.format(name)\n yield 'SET DOCUMENT Version = \"{}\"'.format(version or time.strftime(\n '%Y%m%d'))\n if description:\n yield 'SET DOCUMENT Description = \"{}\"'.format(description.replace(\n '\\n', ''))\n if authors:\n yield 'SET DOCUMENT Authors = \"{}\"'.format(authors)\n if contact:\n yield 'SET DOCUMENT ContactInfo = \"{}\"'.format(contact)\n if licenses:\n yield 'SET DOCUMENT Licenses = \"{}\"'.format(licenses)\n if copyright:\n yield 'SET DOCUMENT Copyright = \"{}\"'.format(copyright)\n if disclaimer:\n yield 'SET DOCUMENT Disclaimer = \"{}\"'.format(disclaimer)\n yield ''\n\n\ndef make_document_namespaces(namespace_url: Optional[Mapping[str, str]]=\n None, namespace_patterns: Optional[Mapping[str, str]]=None) ->Iterable[str\n ]:\n \"\"\"Iterate over lines for the namespace definitions.\n\n :param namespace_url: dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: A dictionary of {str name: str regex}\n \"\"\"\n yield '#' * 80\n yield '#| Namespaces'\n yield '#' * 80\n if namespace_url:\n yield '\\n# Enumerated Namespaces'\n yield '# ---------------------'\n for name, url in sorted(namespace_url.items()):\n yield NAMESPACE_URL_FMT.format(name, url)\n if namespace_patterns:\n yield '\\n# Regular Expression Namespaces'\n yield '# -----------------------------'\n for name, pattern in sorted(namespace_patterns.items()):\n yield NAMESPACE_PATTERN_FMT.format(name, pattern)\n yield ''\n\n\ndef make_document_annotations(annotation_url: Optional[Mapping[str, str]]=\n None, annotation_patterns: Optional[Mapping[str, str]]=None,\n annotation_list: Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the annotation definitions.\n\n :param annotation_url: A dictionary of {str name: str URL} of annotations\n :param annotation_patterns: A dictionary of {str name: str regex}\n :param annotation_list: A dictionary of {str name: set of name str}\n \"\"\"\n if annotation_url or annotation_patterns or annotation_list:\n yield '#' * 80\n yield '#| Annotations'\n yield '#' * 80\n if annotation_url:\n yield '\\n# Enumerated Annotations'\n yield '# ----------------------'\n for name, url in sorted(annotation_url.items()):\n yield ANNOTATION_URL_FMT.format(name, url)\n if annotation_patterns:\n yield '\\n# Regular Expression Annotations'\n yield '# ------------------------------'\n for name, pattern in sorted(annotation_patterns.items()):\n yield ANNOTATION_PATTERN_FMT.format(name, pattern)\n if annotation_list:\n yield '\\n# Locally Defined Annotations'\n yield '# ---------------------------'\n for annotation, values in sorted(annotation_list.items()):\n yield format_annotation_list(annotation, values)\n yield ''\n",
"step-4": "<mask token>\nimport time\nfrom typing import Iterable, Mapping, Optional, Set\nfrom .constants import ANNOTATION_PATTERN_FMT, ANNOTATION_URL_FMT, NAMESPACE_PATTERN_FMT, NAMESPACE_URL_FMT, format_annotation_list\n__all__ = ['make_knowledge_header']\n\n\ndef make_knowledge_header(name: str, version: Optional[str]=None,\n description: Optional[str]=None, authors: Optional[str]=None, contact:\n Optional[str]=None, copyright: Optional[str]=None, licenses: Optional[\n str]=None, disclaimer: Optional[str]=None, namespace_url: Optional[\n Mapping[str, str]]=None, namespace_patterns: Optional[Mapping[str, str]\n ]=None, annotation_url: Optional[Mapping[str, str]]=None,\n annotation_patterns: Optional[Mapping[str, str]]=None, annotation_list:\n Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the header of a BEL document, with standard document metadata and definitions.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n \"\"\"\n yield from make_document_metadata(name=name, contact=contact,\n description=description, authors=authors, version=version,\n copyright=copyright, licenses=licenses, disclaimer=disclaimer)\n yield from make_document_namespaces(namespace_url=namespace_url,\n namespace_patterns=namespace_patterns)\n yield from make_document_annotations(annotation_url=annotation_url,\n annotation_patterns=annotation_patterns, annotation_list=\n annotation_list)\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80\n\n\ndef make_document_metadata(name: str, version: Optional[str]=None, contact:\n Optional[str]=None, description: Optional[str]=None, authors: Optional[\n str]=None, copyright: Optional[str]=None, licenses: Optional[str]=None,\n disclaimer: Optional[str]=None) ->Iterable[str]:\n \"\"\"Iterate over the lines for the document metadata section of a BEL document.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to the current date in ``YYYYMMDD`` format.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n \"\"\"\n yield '#' * 80\n yield '#| Metadata'\n yield '#' * 80 + '\\n'\n yield 'SET DOCUMENT Name = \"{}\"'.format(name)\n yield 'SET DOCUMENT Version = \"{}\"'.format(version or time.strftime(\n '%Y%m%d'))\n if description:\n yield 'SET DOCUMENT Description = \"{}\"'.format(description.replace(\n '\\n', ''))\n if authors:\n yield 'SET DOCUMENT Authors = \"{}\"'.format(authors)\n if contact:\n yield 'SET DOCUMENT ContactInfo = \"{}\"'.format(contact)\n if licenses:\n yield 'SET DOCUMENT Licenses = \"{}\"'.format(licenses)\n if copyright:\n yield 'SET DOCUMENT Copyright = \"{}\"'.format(copyright)\n if disclaimer:\n yield 'SET DOCUMENT Disclaimer = \"{}\"'.format(disclaimer)\n yield ''\n\n\ndef make_document_namespaces(namespace_url: Optional[Mapping[str, str]]=\n None, namespace_patterns: Optional[Mapping[str, str]]=None) ->Iterable[str\n ]:\n \"\"\"Iterate over lines for the namespace definitions.\n\n :param namespace_url: dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: A dictionary of {str name: str regex}\n \"\"\"\n yield '#' * 80\n yield '#| Namespaces'\n yield '#' * 80\n if namespace_url:\n yield '\\n# Enumerated Namespaces'\n yield '# ---------------------'\n for name, url in sorted(namespace_url.items()):\n yield NAMESPACE_URL_FMT.format(name, url)\n if namespace_patterns:\n yield '\\n# Regular Expression Namespaces'\n yield '# -----------------------------'\n for name, pattern in sorted(namespace_patterns.items()):\n yield NAMESPACE_PATTERN_FMT.format(name, pattern)\n yield ''\n\n\ndef make_document_annotations(annotation_url: Optional[Mapping[str, str]]=\n None, annotation_patterns: Optional[Mapping[str, str]]=None,\n annotation_list: Optional[Mapping[str, Set[str]]]=None) ->Iterable[str]:\n \"\"\"Iterate over lines for the annotation definitions.\n\n :param annotation_url: A dictionary of {str name: str URL} of annotations\n :param annotation_patterns: A dictionary of {str name: str regex}\n :param annotation_list: A dictionary of {str name: set of name str}\n \"\"\"\n if annotation_url or annotation_patterns or annotation_list:\n yield '#' * 80\n yield '#| Annotations'\n yield '#' * 80\n if annotation_url:\n yield '\\n# Enumerated Annotations'\n yield '# ----------------------'\n for name, url in sorted(annotation_url.items()):\n yield ANNOTATION_URL_FMT.format(name, url)\n if annotation_patterns:\n yield '\\n# Regular Expression Annotations'\n yield '# ------------------------------'\n for name, pattern in sorted(annotation_patterns.items()):\n yield ANNOTATION_PATTERN_FMT.format(name, pattern)\n if annotation_list:\n yield '\\n# Locally Defined Annotations'\n yield '# ---------------------------'\n for annotation, values in sorted(annotation_list.items()):\n yield format_annotation_list(annotation, values)\n yield ''\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"Utilities for reading BEL Script.\"\"\"\n\nimport time\nfrom typing import Iterable, Mapping, Optional, Set\n\nfrom .constants import (\n ANNOTATION_PATTERN_FMT, ANNOTATION_URL_FMT, NAMESPACE_PATTERN_FMT, NAMESPACE_URL_FMT, format_annotation_list,\n)\n\n__all__ = [\n 'make_knowledge_header',\n]\n\n\ndef make_knowledge_header(name: str,\n version: Optional[str] = None,\n description: Optional[str] = None,\n authors: Optional[str] = None,\n contact: Optional[str] = None,\n copyright: Optional[str] = None,\n licenses: Optional[str] = None,\n disclaimer: Optional[str] = None,\n namespace_url: Optional[Mapping[str, str]] = None,\n namespace_patterns: Optional[Mapping[str, str]] = None,\n annotation_url: Optional[Mapping[str, str]] = None,\n annotation_patterns: Optional[Mapping[str, str]] = None,\n annotation_list: Optional[Mapping[str, Set[str]]] = None,\n ) -> Iterable[str]:\n \"\"\"Iterate over lines for the header of a BEL document, with standard document metadata and definitions.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n \"\"\"\n yield from make_document_metadata(\n name=name,\n contact=contact,\n description=description,\n authors=authors,\n version=version,\n copyright=copyright,\n licenses=licenses,\n disclaimer=disclaimer,\n )\n\n yield from make_document_namespaces(\n namespace_url=namespace_url,\n namespace_patterns=namespace_patterns,\n )\n\n yield from make_document_annotations(\n annotation_url=annotation_url,\n annotation_patterns=annotation_patterns,\n annotation_list=annotation_list,\n )\n\n yield '#' * 80\n yield '#| Statements'\n yield '#' * 80\n\n\ndef make_document_metadata(name: str,\n version: Optional[str] = None,\n contact: Optional[str] = None,\n description: Optional[str] = None,\n authors: Optional[str] = None,\n copyright: Optional[str] = None,\n licenses: Optional[str] = None,\n disclaimer: Optional[str] = None,\n ) -> Iterable[str]:\n \"\"\"Iterate over the lines for the document metadata section of a BEL document.\n\n :param name: The unique name for this BEL document\n :param version: The version. Defaults to the current date in ``YYYYMMDD`` format.\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param contact: The email address of the maintainer\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n \"\"\"\n yield '#' * 80\n yield '#| Metadata'\n yield '#' * 80 + '\\n'\n\n yield 'SET DOCUMENT Name = \"{}\"'.format(name)\n yield 'SET DOCUMENT Version = \"{}\"'.format(version or time.strftime('%Y%m%d'))\n\n if description:\n yield 'SET DOCUMENT Description = \"{}\"'.format(description.replace('\\n', ''))\n\n if authors:\n yield 'SET DOCUMENT Authors = \"{}\"'.format(authors)\n\n if contact:\n yield 'SET DOCUMENT ContactInfo = \"{}\"'.format(contact)\n\n if licenses:\n yield 'SET DOCUMENT Licenses = \"{}\"'.format(licenses)\n\n if copyright:\n yield 'SET DOCUMENT Copyright = \"{}\"'.format(copyright)\n\n if disclaimer:\n yield 'SET DOCUMENT Disclaimer = \"{}\"'.format(disclaimer)\n\n yield ''\n\n\ndef make_document_namespaces(namespace_url: Optional[Mapping[str, str]] = None,\n namespace_patterns: Optional[Mapping[str, str]] = None,\n ) -> Iterable[str]:\n \"\"\"Iterate over lines for the namespace definitions.\n\n :param namespace_url: dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: A dictionary of {str name: str regex}\n \"\"\"\n yield '#' * 80\n yield '#| Namespaces'\n yield '#' * 80\n\n if namespace_url:\n yield '\\n# Enumerated Namespaces'\n yield '# ---------------------'\n for name, url in sorted(namespace_url.items()):\n yield NAMESPACE_URL_FMT.format(name, url)\n\n if namespace_patterns:\n yield '\\n# Regular Expression Namespaces'\n yield '# -----------------------------'\n for name, pattern in sorted(namespace_patterns.items()):\n yield NAMESPACE_PATTERN_FMT.format(name, pattern)\n\n yield ''\n\n\ndef make_document_annotations(annotation_url: Optional[Mapping[str, str]] = None,\n annotation_patterns: Optional[Mapping[str, str]] = None,\n annotation_list: Optional[Mapping[str, Set[str]]] = None,\n ) -> Iterable[str]:\n \"\"\"Iterate over lines for the annotation definitions.\n\n :param annotation_url: A dictionary of {str name: str URL} of annotations\n :param annotation_patterns: A dictionary of {str name: str regex}\n :param annotation_list: A dictionary of {str name: set of name str}\n \"\"\"\n if annotation_url or annotation_patterns or annotation_list:\n yield '#' * 80\n yield '#| Annotations'\n yield '#' * 80\n\n if annotation_url:\n yield '\\n# Enumerated Annotations'\n yield '# ----------------------'\n for name, url in sorted(annotation_url.items()):\n yield ANNOTATION_URL_FMT.format(name, url)\n\n if annotation_patterns:\n yield '\\n# Regular Expression Annotations'\n yield '# ------------------------------'\n for name, pattern in sorted(annotation_patterns.items()):\n yield ANNOTATION_PATTERN_FMT.format(name, pattern)\n\n if annotation_list:\n yield '\\n# Locally Defined Annotations'\n yield '# ---------------------------'\n for annotation, values in sorted(annotation_list.items()):\n yield format_annotation_list(annotation, values)\n\n yield ''\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *
x[n - 1] + K(x[:n - 2]), True))
<|reserved_special_token_0|>
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *
x[n - 1] + K(x[:n - 2]), True))
<|reserved_special_token_0|>
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *
x[n - 1] + K(x[:n - 2]), True))
K = Function.K(integer=True, eval=K_step, shape=())
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
from util import *
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *
x[n - 1] + K(x[:n - 2]), True))
K = Function.K(integer=True, eval=K_step, shape=())
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
from util import *
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)),
(x[1], Equal(n, 2)),
(K(x[:n - 1]) * x[n - 1] + K(x[:n - 2]), True))
K = Function.K(integer=True, eval=K_step, shape=())
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
# created on 2021-08-18
|
flexible
|
{
"blob_id": "b00c07ee3cdba55800c9701b7b8b0e3c9079e9f8",
"index": 6272,
"step-1": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\n<mask token>\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\n<mask token>\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-3": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "from util import *\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "from util import *\n\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)),\n (x[1], Equal(n, 2)),\n (K(x[:n - 1]) * x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n\n Eq << apply(K(x[:n + 2]))\n\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n# created on 2021-08-18\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from golem import actions
from projects.golem_integration.pages import golem_steps
description = 'close_window_by_partial_title action'
def test(data):
actions.navigate(data.env.url + 'tabs/')
actions.send_keys('#title', 'lorem ipsum')
actions.click('#goButtonCustom')
actions.assert_amount_of_windows(2)
actions.close_window_by_partial_title('lorem')
golem_steps.assert_last_step_message(
"Close window by partial title 'lorem'")
actions.assert_amount_of_windows(1)
|
normal
|
{
"blob_id": "8fe45332ce09195beabb24c8cbb56868c564ded4",
"index": 2132,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test(data):\n actions.navigate(data.env.url + 'tabs/')\n actions.send_keys('#title', 'lorem ipsum')\n actions.click('#goButtonCustom')\n actions.assert_amount_of_windows(2)\n actions.close_window_by_partial_title('lorem')\n golem_steps.assert_last_step_message(\n \"Close window by partial title 'lorem'\")\n actions.assert_amount_of_windows(1)\n",
"step-3": "<mask token>\ndescription = 'close_window_by_partial_title action'\n\n\ndef test(data):\n actions.navigate(data.env.url + 'tabs/')\n actions.send_keys('#title', 'lorem ipsum')\n actions.click('#goButtonCustom')\n actions.assert_amount_of_windows(2)\n actions.close_window_by_partial_title('lorem')\n golem_steps.assert_last_step_message(\n \"Close window by partial title 'lorem'\")\n actions.assert_amount_of_windows(1)\n",
"step-4": "from golem import actions\nfrom projects.golem_integration.pages import golem_steps\ndescription = 'close_window_by_partial_title action'\n\n\ndef test(data):\n actions.navigate(data.env.url + 'tabs/')\n actions.send_keys('#title', 'lorem ipsum')\n actions.click('#goButtonCustom')\n actions.assert_amount_of_windows(2)\n actions.close_window_by_partial_title('lorem')\n golem_steps.assert_last_step_message(\n \"Close window by partial title 'lorem'\")\n actions.assert_amount_of_windows(1)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
<|reserved_special_token_0|>
print_choices(human_choice, computer_choice)
<|reserved_special_token_0|>
print_result(compose_output_message(game_result, human_choice, computer_choice)
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
OPTIONS = ['rock', 'paper', 'scissors']
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_choices(human_choice, computer_choice)
game_result = eval_game_result(human_choice, computer_choice)
print_result(compose_output_message(game_result, human_choice, computer_choice)
)
<|reserved_special_token_1|>
import random
OPTIONS = ['rock', 'paper', 'scissors']
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_choices(human_choice, computer_choice)
game_result = eval_game_result(human_choice, computer_choice)
print_result(compose_output_message(game_result, human_choice, computer_choice)
)
|
flexible
|
{
"blob_id": "2e6bce05c8ba21aa322e306d2cdb8871531d7341",
"index": 5499,
"step-1": "<mask token>\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\n<mask token>\nprint_choices(human_choice, computer_choice)\n<mask token>\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-3": "<mask token>\nOPTIONS = ['rock', 'paper', 'scissors']\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\nhuman_choice = get_human_choice()\ncomputer_choice = get_computer_choice()\nprint_choices(human_choice, computer_choice)\ngame_result = eval_game_result(human_choice, computer_choice)\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-4": "import random\nOPTIONS = ['rock', 'paper', 'scissors']\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\nhuman_choice = get_human_choice()\ncomputer_choice = get_computer_choice()\nprint_choices(human_choice, computer_choice)\ngame_result = eval_game_result(human_choice, computer_choice)\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def maskRCNN_model():
config_file = (
'/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'
)
cfg.merge_from_file(config_file)
cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])
coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
<|reserved_special_token_0|>
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])
bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])
left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])
right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def maskRCNN_model():
config_file = (
'/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'
)
cfg.merge_from_file(config_file)
cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])
coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
<|reserved_special_token_0|>
print('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),
modelName + '.pth')))
<|reserved_special_token_0|>
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:]
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])
bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])
left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])
right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk('./ims/'):
for filename in files:
image_path = path + '/' + filename
image = np.array(Image.open(image_path))
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=
pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(
np.float32)
extreme_points = extreme_points_ori - [np.min(
extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])
] + [pad, pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[
1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points,
sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
input_dextr = np.concatenate((resize_image, extreme_heatmap[:,
:, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.
newaxis, ...])
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear',
align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[
:2], zero_pad=True, relax=pad) > thres
results = result
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave('./output/output_' + filename, out_img)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PAD_SIZE = 10
def maskRCNN_model():
config_file = (
'/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'
)
cfg.merge_from_file(config_file)
cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])
coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
modelName = 'dextr_pascal-sbd'
pad = 50
thres = 0.8
gpu_id = 0
device = torch.device('cpu')
net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),
modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(),
modelName + '.pth'), map_location=lambda storage, loc: storage)
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:]
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])
bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])
left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])
right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk('./ims/'):
for filename in files:
image_path = path + '/' + filename
image = np.array(Image.open(image_path))
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=
pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(
np.float32)
extreme_points = extreme_points_ori - [np.min(
extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])
] + [pad, pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[
1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points,
sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
input_dextr = np.concatenate((resize_image, extreme_heatmap[:,
:, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.
newaxis, ...])
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear',
align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[
:2], zero_pad=True, relax=pad) > thres
results = result
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave('./output/output_' + filename, out_img)
<|reserved_special_token_1|>
import os
import torch
from collections import OrderedDict
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mplimg
from torch.nn.functional import upsample
import networks.deeplab_resnet as resnet
from mypath import Path
from dataloaders import helpers as helpers
from maskRCNN.maskrcnn_benchmark.config import cfg
from maskRCNN.demo.predictor_person import COCODemo
from skimage import io
PAD_SIZE = 10
def maskRCNN_model():
config_file = (
'/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'
)
cfg.merge_from_file(config_file)
cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])
coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
modelName = 'dextr_pascal-sbd'
pad = 50
thres = 0.8
gpu_id = 0
device = torch.device('cpu')
net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),
modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(),
modelName + '.pth'), map_location=lambda storage, loc: storage)
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:]
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])
bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])
left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])
right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk('./ims/'):
for filename in files:
image_path = path + '/' + filename
image = np.array(Image.open(image_path))
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=
pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(
np.float32)
extreme_points = extreme_points_ori - [np.min(
extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])
] + [pad, pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[
1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points,
sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
input_dextr = np.concatenate((resize_image, extreme_heatmap[:,
:, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.
newaxis, ...])
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear',
align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[
:2], zero_pad=True, relax=pad) > thres
results = result
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave('./output/output_' + filename, out_img)
<|reserved_special_token_1|>
import os
import torch
from collections import OrderedDict
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mplimg
from torch.nn.functional import upsample
import networks.deeplab_resnet as resnet
from mypath import Path
from dataloaders import helpers as helpers
from maskRCNN.maskrcnn_benchmark.config import cfg
from maskRCNN.demo.predictor_person import COCODemo
from skimage import io
PAD_SIZE = 10
def maskRCNN_model():
config_file = "/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.9,
)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
modelName = 'dextr_pascal-sbd'
pad = 50
thres = 0.8
gpu_id = 0
device = torch.device("cpu")
#device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
# Create the network and load the weights
net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print("Initializing weights from: {}".format(os.path.join(Path.models_dir(), modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), modelName + '.pth'),
map_location=lambda storage, loc: storage)
# Remove the prefix .module from the model when it is trained using DataParallel
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:] # remove `module.` from multi-gpu training
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
# Read image and click the points
#plt.ion()
#plt.axis('off')
#plt.imshow(image)
#plt.title('Click the four extreme points of the objects\nHit enter when done (do not close the window)')
#results = []
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
# Mid point
#top = np.array([(x_max-x_min)/2, y_min])
#bottom = np.array([(x_max-x_min)/2, y_max])
#left = np.array([x_min, (y_max-y_min)/2])
#right = np.array([x_max, (y_max-y_min)/2])
# Original
#top = np.array([x_min, y_min])
#bottom = np.array([x_max, y_max])
#left = np.array([x_min, y_max])
#right = np.array([x_max, y_min])
# Customized
top = np.array([x_min+(x_max-x_min)*0.5, y_min-PAD_SIZE])
bottom = np.array([x_min+(x_max-x_min)*0.5, y_max+PAD_SIZE])
left = np.array([x_min-PAD_SIZE, y_min+(y_max-y_min)*0.95])
right = np.array([x_max+PAD_SIZE, y_min+(y_max-y_min)*0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0])+PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk("./ims/"):
for filename in files:
#extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)
#extreme_points_ori = np.array(bbox).astype(np.int)
image_path = path + "/" + filename
image = np.array(Image.open(image_path))
# Get the mask for person from maskRCNN and compute the extreme points using the mask
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
#extreme_points_ori = get_extreme_points(BBox)
#extreme_points_ori = np.array([[205,60],[3,450],[275,475],[560,470]]).astype(np.int)
# Crop image to the bounding box from the extreme points and resize
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)
# Generate extreme point heat map normalized to image values
extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad,
pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
# Concatenate inputs and convert to tensor
input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])
# Run a forward pass
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres
#results.append(result)
results = result
# Plot the results
#plt.imshow(//helpers.overlay_masks(image / 255, results))
#plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave("./output/output_" + filename, out_img)
|
flexible
|
{
"blob_id": "2c8b8e9767ac8400fb6390e0851d9df10df7cd8c",
"index": 8729,
"step-1": "<mask token>\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\n<mask token>\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\n<mask token>\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\n<mask token>\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-3": "<mask token>\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device('cpu')\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), \n modelName + '.pth'), map_location=lambda storage, loc: storage)\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-4": "import os\nimport torch\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as mplimg\nfrom torch.nn.functional import upsample\nimport networks.deeplab_resnet as resnet\nfrom mypath import Path\nfrom dataloaders import helpers as helpers\nfrom maskRCNN.maskrcnn_benchmark.config import cfg\nfrom maskRCNN.demo.predictor_person import COCODemo\nfrom skimage import io\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = (\n '/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml'\n )\n cfg.merge_from_file(config_file)\n cfg.merge_from_list(['MODEL.DEVICE', 'cpu'])\n coco_demo = COCODemo(cfg, min_image_size=800, confidence_threshold=0.9)\n return coco_demo\n\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n return predictions, bbox, masks, heatmap\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device('cpu')\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint('Initializing weights from: {}'.format(os.path.join(Path.models_dir(),\n modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), \n modelName + '.pth'), map_location=lambda storage, loc: storage)\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:]\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n top = np.array([x_min + (x_max - x_min) * 0.5, y_min - PAD_SIZE])\n bottom = np.array([x_min + (x_max - x_min) * 0.5, y_max + PAD_SIZE])\n left = np.array([x_min - PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n right = np.array([x_max + PAD_SIZE, y_min + (y_max - y_min) * 0.95])\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0]) + PAD_SIZE]\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n return points\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk('./ims/'):\n for filename in files:\n image_path = path + '/' + filename\n image = np.array(Image.open(image_path))\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=\n pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(\n np.float32)\n extreme_points = extreme_points_ori - [np.min(\n extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])\n ] + [pad, pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[\n 1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points,\n sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:,\n :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.\n newaxis, ...])\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear',\n align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[\n :2], zero_pad=True, relax=pad) > thres\n results = result\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave('./output/output_' + filename, out_img)\n",
"step-5": "import os\nimport torch\nfrom collections import OrderedDict\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as mplimg\n\nfrom torch.nn.functional import upsample\n\nimport networks.deeplab_resnet as resnet\nfrom mypath import Path\nfrom dataloaders import helpers as helpers\n\nfrom maskRCNN.maskrcnn_benchmark.config import cfg\nfrom maskRCNN.demo.predictor_person import COCODemo\nfrom skimage import io\n\nPAD_SIZE = 10\n\n\ndef maskRCNN_model():\n config_file = \"/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml\"\n\n # update the config options with the config file\n cfg.merge_from_file(config_file)\n # manual override some options\n cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n\n coco_demo = COCODemo(\n cfg,\n min_image_size=800,\n confidence_threshold=0.9,\n )\n\n return coco_demo\n\ndef get_maskRCNN_predictions(model, image_path):\n image = io.imread(image_path)\n predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)\n\n return predictions, bbox, masks, heatmap\n\n\n\nmodelName = 'dextr_pascal-sbd'\npad = 50\nthres = 0.8\ngpu_id = 0\ndevice = torch.device(\"cpu\")\n#device = torch.device(\"cuda:\"+str(gpu_id) if torch.cuda.is_available() else \"cpu\")\n\n# Create the network and load the weights\nnet = resnet.resnet101(1, nInputChannels=4, classifier='psp')\nprint(\"Initializing weights from: {}\".format(os.path.join(Path.models_dir(), modelName + '.pth')))\nstate_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), modelName + '.pth'),\n map_location=lambda storage, loc: storage)\n# Remove the prefix .module from the model when it is trained using DataParallel\nif 'module.' in list(state_dict_checkpoint.keys())[0]:\n new_state_dict = OrderedDict()\n for k, v in state_dict_checkpoint.items():\n name = k[7:] # remove `module.` from multi-gpu training\n new_state_dict[name] = v\nelse:\n new_state_dict = state_dict_checkpoint\nnet.load_state_dict(new_state_dict)\nnet.eval()\nnet.to(device)\n\n# Read image and click the points\n#plt.ion()\n#plt.axis('off')\n#plt.imshow(image)\n#plt.title('Click the four extreme points of the objects\\nHit enter when done (do not close the window)')\n\n#results = []\n\ndef get_extreme_points(BBox):\n x_min = np.int(BBox[0][0])\n y_min = np.int(BBox[0][1])\n x_max = np.int(BBox[0][2])\n y_max = np.int(BBox[0][3])\n\n # Mid point\n #top = np.array([(x_max-x_min)/2, y_min])\n #bottom = np.array([(x_max-x_min)/2, y_max])\n #left = np.array([x_min, (y_max-y_min)/2])\n #right = np.array([x_max, (y_max-y_min)/2])\n\n # Original\n #top = np.array([x_min, y_min])\n #bottom = np.array([x_max, y_max])\n #left = np.array([x_min, y_max])\n #right = np.array([x_max, y_min])\n\n # Customized\n top = np.array([x_min+(x_max-x_min)*0.5, y_min-PAD_SIZE])\n bottom = np.array([x_min+(x_max-x_min)*0.5, y_max+PAD_SIZE])\n left = np.array([x_min-PAD_SIZE, y_min+(y_max-y_min)*0.95])\n right = np.array([x_max+PAD_SIZE, y_min+(y_max-y_min)*0.95])\n\n extreme_points = np.array([top, left, right, bottom]).astype(np.int)\n\n return extreme_points\n\n\ndef get_EP_by_mask(mask):\n mask = mask.squeeze()\n idx = np.nonzero(mask)\n\n left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]\n right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]\n top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]\n bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0])+PAD_SIZE]\n\n points = [top, left, right, bottom]\n points = np.array(points).astype(np.int)\n\n return points\n\n\n\nwith torch.no_grad():\n model = maskRCNN_model()\n for path, dirs, files in os.walk(\"./ims/\"):\n for filename in files:\n #extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)\n #extreme_points_ori = np.array(bbox).astype(np.int)\n image_path = path + \"/\" + filename\n image = np.array(Image.open(image_path))\n\n # Get the mask for person from maskRCNN and compute the extreme points using the mask\n _, _, mask, _ = get_maskRCNN_predictions(model, image_path)\n extreme_points_ori = get_EP_by_mask(mask)\n\n #extreme_points_ori = get_extreme_points(BBox)\n #extreme_points_ori = np.array([[205,60],[3,450],[275,475],[560,470]]).astype(np.int)\n \n\n # Crop image to the bounding box from the extreme points and resize\n bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True)\n crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)\n resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)\n\n # Generate extreme point heat map normalized to image values\n extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad,\n pad]\n extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)\n extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)\n extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)\n\n # Concatenate inputs and convert to tensor\n input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)\n inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])\n\n # Run a forward pass\n inputs = inputs.to(device)\n outputs = net.forward(inputs)\n outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)\n outputs = outputs.to(torch.device('cpu'))\n\n pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))\n pred = 1 / (1 + np.exp(-pred))\n pred = np.squeeze(pred)\n result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres\n\n #results.append(result)\n results = result\n\n # Plot the results\n #plt.imshow(//helpers.overlay_masks(image / 255, results))\n #plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')\n out_img = helpers.overlay_masks(image / 255, results)\n mplimg.imsave(\"./output/output_\" + filename, out_img)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(len(lines)):
if i + 1 < len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + str("',")
elif i + 1 == len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + "'"
<|reserved_special_token_0|>
pyperclip.copy(text)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if i + 1 < len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + str("',")
elif i + 1 == len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + "'"
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
<|reserved_special_token_1|>
import pyperclip
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if i + 1 < len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + str("',")
elif i + 1 == len(lines):
lines[i] = str("'") + str(lines[i]).replace('\r', '').replace('\n', ''
) + "'"
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
<|reserved_special_token_1|>
# python2.7
#formats for oracle lists
import pyperclip
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if (i+1) < len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","") + str('\',')
elif (i+1) == len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","")+ '\''
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
|
flexible
|
{
"blob_id": "454fd88af552d7a46cb39167f21d641420973959",
"index": 2312,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\n<mask token>\npyperclip.copy(text)\n",
"step-3": "<mask token>\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-4": "import pyperclip\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-5": "# python2.7\r\n#formats for oracle lists\r\n\r\nimport pyperclip\r\ntext = str(pyperclip.paste()).strip()\r\n\r\nlines = text.split('\\n')\r\nfor i in range(len(lines)):\r\n if (i+1) < len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\") + str('\\',')\r\n elif (i+1) == len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\")+ '\\''\r\ntext = '(' + '\\n'.join(lines) + ')'\r\n\r\npyperclip.copy(text)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
elem = input('Enter a letter : (type quit to quit) ')
if elem.lower() != 'quit':
newList.append(elem)
else:
break
for item in newList:
if item not in noDuplicate:
noDuplicate.append(item)
print(noDuplicate)
<|reserved_special_token_1|>
newList = []
noDuplicate = []
while True:
elem = input('Enter a letter : (type quit to quit) ')
if elem.lower() != 'quit':
newList.append(elem)
else:
break
for item in newList:
if item not in noDuplicate:
noDuplicate.append(item)
print(noDuplicate)
<|reserved_special_token_1|>
newList = []
noDuplicate = []
while True:
elem = input("Enter a letter : (type quit to quit) ")
if elem.lower() != "quit":
newList.append(elem)
else:
break
for item in newList:
if item not in noDuplicate:
noDuplicate.append(item)
print(noDuplicate)
|
flexible
|
{
"blob_id": "7273592ab8fea10d9a3cde58690063690c74b746",
"index": 4635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n elem = input('Enter a letter : (type quit to quit) ')\n if elem.lower() != 'quit':\n newList.append(elem)\n else:\n break\nfor item in newList:\n if item not in noDuplicate:\n noDuplicate.append(item)\nprint(noDuplicate)\n",
"step-3": "newList = []\nnoDuplicate = []\nwhile True:\n elem = input('Enter a letter : (type quit to quit) ')\n if elem.lower() != 'quit':\n newList.append(elem)\n else:\n break\nfor item in newList:\n if item not in noDuplicate:\n noDuplicate.append(item)\nprint(noDuplicate)\n",
"step-4": "newList = []\nnoDuplicate = []\n\nwhile True:\n elem = input(\"Enter a letter : (type quit to quit) \")\n if elem.lower() != \"quit\":\n newList.append(elem)\n else:\n break\n\nfor item in newList:\n if item not in noDuplicate:\n noDuplicate.append(item)\n\nprint(noDuplicate)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.management.base import BaseCommand, CommandError
from tasks.redisqueue import RedisQueue
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
rqueue = RedisQueue(settings.REDIS_URL)
rqueue.worker()
|
normal
|
{
"blob_id": "cccf6ec50ae00d8e00a1a53ea06fa8b6d061b72e",
"index": 8258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n rqueue = RedisQueue(settings.REDIS_URL)\n rqueue.worker()\n",
"step-4": "from django.core.management.base import BaseCommand, CommandError\nfrom tasks.redisqueue import RedisQueue\nfrom django.conf import settings\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n rqueue = RedisQueue(settings.REDIS_URL)\n rqueue.worker()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
__author__ = 'tqs'
from win32com.client import Dispatch
import win32com.client
import time
import os
import re
import win32api
'''
windows操作部分说明:
考试波及知识点:
1.删除文件及文件夹
2.复制文件及文件夹
3.移动文件及文件夹
4.文件及文件夹改名
5.文件属性
考试样例:
1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。
2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。
3、设置“螺旋藻.aaa”文件属性为“只读”。
4、在桌面上建立“绿色植物”的快捷方式。
'''
class WinOperation:
def __init__(self):
self.soucePath = ''
self.destPath = ''
self.destFilename = ''
self.sourceFilename = ''
def dele(self,destFilename):#删除文件及文件夹
print('删除文件',destFilename)
pass
def rename(self,sourceFilename,destFilename):#文件改名
print(sourceFilename,'文件改名为',destFilename)
pass
def mov(self,sourceFilename,destFilename):#移动文件
print(sourceFilename,'移动文件为',destFilename)
pass
def copy(self,sourceFilename,destFilename):#复制文件
print(sourceFilename,'移动文件为',destFilename)
pass
def prop(self,destFilename):#文件属性
print('文件属性',destFilename)
pass
def realSourceFilename(self,soucePath,sourceFilename):
return sourceFilename
def realdestFilename(self,destPath,destFilename):
return destFilename
def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeDele(self,OperStr):#从文本中判断删除文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
pass
def judgeRename(self,OperStr):#从文本中判断重命名文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeMov(self,OperStr):#从文本中判断移动文件
#形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析
#解析为源文件,目标文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
#需要再取得完整路径,需要查找
sourceFilename=self.realSourceFilename("d:\zrexam\windows",source)
destFilename=self.realdestFilename("d:\zrexam\windows",dest)
self.mov(sourceFilename,destFilename)
def judgeCopy(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeProp(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)
pass
def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作
for item in OperStrList:
pass
def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表
pass
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("新建") !=-1:
print("进入新建操作")
self.judgeNew(OperStr)
print("结束新建操作")
if OperStr.find("删除") !=-1:
print("进入删除操作")
self.judgeDele(OperStr)
print("结束删除操作")
if OperStr.find("复制") !=-1:
print("进入复制操作")
self.judgeCopy(OperStr)
print("结束复制操作")
if OperStr.find("移动") !=-1:
print("进入移动操作")
self.judgeMov(OperStr)
print("结束移动操作")
if OperStr.find("改名") !=-1:
print("进入改名操作")
self.judgeRename(OperStr)
print("结束改名操作")
if OperStr.find("属性") !=-1:
print("进入属性操作")
self.judgeProp(OperStr)
print("结束属性操作")
'''
word操作部分说明:
考试波及知识点:
1.字体
2.段落
3.查找替换
4.插入 表格,艺术字,图片
5.页边距,分栏
1. 将标题“师恩难忘”设置为黑体,居中对齐。
2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。
3.将文中所有的“田老师”替换为“田先生”。
4. 设置页边距为上下各2.5厘米(应用于整篇文档)。
5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。
考试样例:
'''
class WordOperation:
def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
def save(self, newfilename=None): #保存文件
if newfilename:
self.filename = newfilename
else:
pass
def close(self): #关闭文件
del self.wordApp
def fontOper(self):
pass
def replaceOper(self,source,dest):
pass
def insertOper(self,style):
pass
def pageOper(self):
pass
def paragraphOper(self):
pass
def judgePage(self,OperStr):
print('正在完成要求',OperStr)
def judgeFont(self,OperStr):
print('正在完成要求',OperStr)
def judgeReplace(self,OperStr):
print('正在完成要求',OperStr)
def judgeInsert(self,OperStr):
print('正在完成要求',OperStr)
def judgeParagraph(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("标题") !=-1 or OperStr.find("黑体") !=-1 or OperStr.find("居中对齐") !=-1:
print("进入字体操作")
self.judgeFont(OperStr)
print("结束字体")
elif OperStr.find("首行缩进") !=-1 or OperStr.find("行距") !=-1:
print("进入段落操作")
self.judgeParagraph(OperStr)
print("结束段落操作")
elif OperStr.find("插入") !=-1:
print("进入插入操作")
self.judgeInsert(OperStr)
print("结束插入操作")
elif OperStr.find("页边距") !=-1:
print("进入页边距操作")
self.judgePage(OperStr)
print("结束页边距操作")
elif OperStr.find("分栏") !=-1:
print("进入分栏操作")
self.judgeFont(OperStr)
print("结束分栏操作")
elif OperStr.find("替换") !=-1:
print("进入替换操作")
self.judgeReplace(OperStr)
print("结束替换操作")
'''
Excel操作部分说明:
考试波及知识点:
1.行高列宽
2.格式相关
3.公式函数
4.排序
5.插入图表
考试样例:
1.将A2所在行的行高设置为30(40像素)。
2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。
3.给A2:F8单元格区域加所有框线。
4.按“无人机社团人数”由高到低排序。
5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。
'''
class ExcelOperation:
def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None): #保存文件
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self): #关闭文件
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col): #获取单元格的数据
"Get value of one cell"
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value): #设置单元格的数据
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col): #设置单元格的数据
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15#字体大小
sht.Cells(row, col).Font.Bold = True#是否黑体
sht.Cells(row, col).Font.Name = "Arial"#字体类型
sht.Cells(row, col).Interior.ColorIndex = 3#表格背景
#sht.Range("A1").Borders.LineStyle = xlDouble
sht.Cells(row, col).BorderAround(1,4)#表格边框
sht.Rows(3).RowHeight = 30#行高
sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter
sht.Cells(row, col).VerticalAlignment = -4160 #
def rowHeightOper(self,sheet,row,height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()#删除行
sht.Columns(row).Delete()#删除列
def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组
"return a 2d array (i.e. tuple of tuples)"
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片
"Insert a picture in sheet"
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before): #复制工作表
"copy sheet"
shts = self.xlBook.Worksheets
shts(1).Copy(None,shts(1))
def judgeRowHeight(self,OperStr):#行高操作
print('正在完成要求',OperStr)
def judgeColWidth(self,OperStr):
print('正在完成要求',OperStr)
def judgeFormula(self,OperStr):
print('正在完成要求',OperStr)
def judgeFunction(self,OperStr):
print('正在完成要求',OperStr)
def judgeSort(self,OperStr):
print('正在完成要求',OperStr)
def judgeChart(self,OperStr):
print('正在完成要求',OperStr)
def judgeBoxLine(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("行高") !=-1:
print("进入行高操作")
self.judgeRowHeight(OperStr)
print("结束行高操作")
if OperStr.find("列宽") !=-1:
print("进入列宽操作")
self.judgeColWidth(OperStr)
print("结束列宽操作")
if OperStr.find("公式") !=-1:
print("进入公式操作")
self.judgeFormula(OperStr)
print("结束公式操作")
if OperStr.find("函数") !=-1:
print("进入函数操作")
self.judgeFunction(OperStr)
print("结束函数操作")
if OperStr.find("所有框线") !=-1:
print("进入所有框线操作")
self.judgeBoxLine(OperStr)
print("结束所有框线操作")
if OperStr.find("排序") !=-1:
print("进入排序操作")
self.judgeSort(OperStr)
print("结束排序操作")
if OperStr.find("图表") !=-1:
print("进入图表操作")
self.judgeChart(OperStr)
print("结束图表操作")
pass
'''
PPT操作部分说明:
1.动画效果
2.切换效果
3.超级链接
4.背景
5.插入,图片,声音,视频
考试样例:
1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。
2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。
3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。
4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。
5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。
'''
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self,style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
def judgeSwitch(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
def judgeInsert(self,OperStr):
print('正在完成要求',OperStr)
def judgeBackground(self,OperStr):
print('正在完成要求',OperStr)
def judgeHyperlink(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("动画") !=-1:
print("进入动画操作")
self.judgeAnimation(OperStr)
print("结束动画操作")
if OperStr.find("切换") !=-1:
print("进入切换操作")
self.judgeSwitch(OperStr)
print("结束切换操作")
if OperStr.find("超级链接") !=-1:
print("进入超级链接操作")
self.judgeHyperlink(OperStr)
print("结束超级链接操作")
if OperStr.find("背景") !=-1:
print("进入背景操作")
self.judgeBackground(OperStr)
print("结束背景操作")
if OperStr.find("插入") !=-1:
print("进入插入操作")
self.judgeInsert(OperStr)
print("结束插入操作")
'''
Input文字录入操作部分说明:
考试波及知识点:
com对象的调用演示:
class InputOperation:
'''
class OperationTypeJudge:
def __init__(self):
pass
def getType(self,OperStr):
if OperStr.find("替换") !=-1 or OperStr.find("首行缩进") !=-1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find("公式") !=-1 or OperStr.find("函数") !=-1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find("切换") !=-1 or OperStr.find("动画") !=-1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
'''
选择题部分说明:
'''
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
'''
判断题部分说明:
'''
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
if __name__ == "__main__":
win=WinOperation()
win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。')
win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。')
win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。')
win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。')
word=WordOperation()
word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。')
word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。')
word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。')
word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。')
word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。')
excel=ExcelOperation(r'c:/test.xls')
excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。')
excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。')
excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。')
excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。')
excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。')
ppt=PptOperation()
ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。')
ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。')
ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。')
ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。')
ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')
|
normal
|
{
"blob_id": "b453006b4d4c5f17bb58110fe8197d7796ca0c6c",
"index": 467,
"step-1": "<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n <mask token>\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n <mask token>\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n <mask token>\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n <mask token>\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass WinOperation:\n <mask token>\n <mask token>\n\n def rename(self, sourceFilename, destFilename):\n print(sourceFilename, '文件改名为', destFilename)\n pass\n\n def mov(self, sourceFilename, destFilename):\n print(sourceFilename, '移动文件为', destFilename)\n pass\n <mask token>\n <mask token>\n\n def realSourceFilename(self, soucePath, sourceFilename):\n return sourceFilename\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def judgeCopy(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n pass\n <mask token>\n\n def judgeOperFromList(self, OperStrList):\n for item in OperStrList:\n pass\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('标题') != -1 or OperStr.find('黑体'\n ) != -1 or OperStr.find('居中对齐') != -1:\n print('进入字体操作')\n self.judgeFont(OperStr)\n print('结束字体')\n elif OperStr.find('首行缩进') != -1 or OperStr.find('行距') != -1:\n print('进入段落操作')\n self.judgeParagraph(OperStr)\n print('结束段落操作')\n elif OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n elif OperStr.find('页边距') != -1:\n print('进入页边距操作')\n self.judgePage(OperStr)\n print('结束页边距操作')\n elif OperStr.find('分栏') != -1:\n print('进入分栏操作')\n self.judgeFont(OperStr)\n print('结束分栏操作')\n elif OperStr.find('替换') != -1:\n print('进入替换操作')\n self.judgeReplace(OperStr)\n print('结束替换操作')\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*-\n__author__ = 'tqs'\nfrom win32com.client import Dispatch \nimport win32com.client \nimport time\nimport os\nimport re\nimport win32api\n'''\nwindows操作部分说明:\n考试波及知识点:\n1.删除文件及文件夹\n2.复制文件及文件夹\n3.移动文件及文件夹\n4.文件及文件夹改名\n5.文件属性\n考试样例:\n1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。\n2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。\n3、设置“螺旋藻.aaa”文件属性为“只读”。\n4、在桌面上建立“绿色植物”的快捷方式。\n'''\nclass WinOperation:\n def __init__(self):\n self.soucePath = ''\n self.destPath = ''\n self.destFilename = ''\n self.sourceFilename = ''\n def dele(self,destFilename):#删除文件及文件夹\n print('删除文件',destFilename)\n pass\n def rename(self,sourceFilename,destFilename):#文件改名\n print(sourceFilename,'文件改名为',destFilename)\n pass\n def mov(self,sourceFilename,destFilename):#移动文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def copy(self,sourceFilename,destFilename):#复制文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def prop(self,destFilename):#文件属性\n print('文件属性',destFilename)\n pass\n def realSourceFilename(self,soucePath,sourceFilename):\n return sourceFilename\n def realdestFilename(self,destPath,destFilename):\n return destFilename\n def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeDele(self,OperStr):#从文本中判断删除文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n pass\n def judgeRename(self,OperStr):#从文本中判断重命名文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeMov(self,OperStr):#从文本中判断移动文件\n #形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析\n #解析为源文件,目标文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n #需要再取得完整路径,需要查找\n sourceFilename=self.realSourceFilename(\"d:\\zrexam\\windows\",source)\n destFilename=self.realdestFilename(\"d:\\zrexam\\windows\",dest)\n self.mov(sourceFilename,destFilename)\n def judgeCopy(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeProp(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)\n pass\n def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作\n for item in OperStrList:\n pass\n def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表 \n pass\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"新建\") !=-1:\n print(\"进入新建操作\")\n self.judgeNew(OperStr)\n print(\"结束新建操作\")\n if OperStr.find(\"删除\") !=-1:\n print(\"进入删除操作\")\n self.judgeDele(OperStr)\n print(\"结束删除操作\")\n if OperStr.find(\"复制\") !=-1:\n print(\"进入复制操作\")\n self.judgeCopy(OperStr)\n print(\"结束复制操作\")\n if OperStr.find(\"移动\") !=-1:\n print(\"进入移动操作\")\n self.judgeMov(OperStr)\n print(\"结束移动操作\")\n if OperStr.find(\"改名\") !=-1:\n print(\"进入改名操作\")\n self.judgeRename(OperStr)\n print(\"结束改名操作\")\n if OperStr.find(\"属性\") !=-1:\n print(\"进入属性操作\")\n self.judgeProp(OperStr)\n print(\"结束属性操作\")\n \n'''\nword操作部分说明:\n考试波及知识点:\n1.字体\n2.段落\n3.查找替换\n4.插入 表格,艺术字,图片\n5.页边距,分栏\n\n1. 将标题“师恩难忘”设置为黑体,居中对齐。\n2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。\n3.将文中所有的“田老师”替换为“田先生”。\n4. 设置页边距为上下各2.5厘米(应用于整篇文档)。\n5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。\n考试样例:\n'''\nclass WordOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.wordApp = win32com.client.Dispatch('Word.Application') \n if filename: \n self.filename = filename\n else:\n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename\n else:\n pass \n def close(self): #关闭文件\n del self.wordApp \n def fontOper(self): \n pass\n def replaceOper(self,source,dest):\n pass\n def insertOper(self,style):\n pass\n def pageOper(self):\n pass\n def paragraphOper(self):\n pass\n def judgePage(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFont(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeReplace(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeParagraph(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"标题\") !=-1 or OperStr.find(\"黑体\") !=-1 or OperStr.find(\"居中对齐\") !=-1:\n print(\"进入字体操作\")\n self.judgeFont(OperStr)\n print(\"结束字体\")\n elif OperStr.find(\"首行缩进\") !=-1 or OperStr.find(\"行距\") !=-1:\n print(\"进入段落操作\")\n self.judgeParagraph(OperStr) \n print(\"结束段落操作\")\n elif OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n elif OperStr.find(\"页边距\") !=-1:\n print(\"进入页边距操作\")\n self.judgePage(OperStr)\n print(\"结束页边距操作\")\n elif OperStr.find(\"分栏\") !=-1:\n print(\"进入分栏操作\")\n self.judgeFont(OperStr)\n print(\"结束分栏操作\")\n elif OperStr.find(\"替换\") !=-1:\n print(\"进入替换操作\")\n self.judgeReplace(OperStr)\n print(\"结束替换操作\")\n \n'''\nExcel操作部分说明:\n考试波及知识点:\n1.行高列宽\n2.格式相关\n3.公式函数\n4.排序\n5.插入图表\n\n考试样例:\n1.将A2所在行的行高设置为30(40像素)。\n2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。\n3.给A2:F8单元格区域加所有框线。\n4.按“无人机社团人数”由高到低排序。\n5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。\n\n'''\nclass ExcelOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.xlApp = win32com.client.Dispatch('Excel.Application') \n if filename: \n self.filename = filename \n self.xlBook = self.xlApp.Workbooks.Open(filename) \n else: \n self.xlBook = self.xlApp.Workbooks.Add() \n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename \n self.xlBook.SaveAs(newfilename) \n else: \n self.xlBook.Save() \n def close(self): #关闭文件\n self.xlBook.Close(SaveChanges=0) \n del self.xlApp \n def getCell(self, sheet, row, col): #获取单元格的数据\n \"Get value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n return sht.Cells(row, col).Value \n def setCell(self, sheet, row, col, value): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Value = value\n def setCellformat(self, sheet, row, col): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Font.Size = 15#字体大小\n sht.Cells(row, col).Font.Bold = True#是否黑体\n sht.Cells(row, col).Font.Name = \"Arial\"#字体类型\n sht.Cells(row, col).Interior.ColorIndex = 3#表格背景\n #sht.Range(\"A1\").Borders.LineStyle = xlDouble\n sht.Cells(row, col).BorderAround(1,4)#表格边框\n sht.Rows(3).RowHeight = 30#行高\n sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter\n sht.Cells(row, col).VerticalAlignment = -4160 #\n def rowHeightOper(self,sheet,row,height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height \n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()#删除行\n sht.Columns(row).Delete()#删除列\n def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组\n \"return a 2d array (i.e. tuple of tuples)\" \n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value \n def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片\n \"Insert a picture in sheet\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n def cpSheet(self, before): #复制工作表\n \"copy sheet\" \n shts = self.xlBook.Worksheets \n shts(1).Copy(None,shts(1))\n def judgeRowHeight(self,OperStr):#行高操作\n print('正在完成要求',OperStr)\n def judgeColWidth(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFormula(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFunction(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeSort(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeChart(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBoxLine(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"行高\") !=-1:\n print(\"进入行高操作\")\n self.judgeRowHeight(OperStr)\n print(\"结束行高操作\")\n if OperStr.find(\"列宽\") !=-1:\n print(\"进入列宽操作\")\n self.judgeColWidth(OperStr)\n print(\"结束列宽操作\")\n if OperStr.find(\"公式\") !=-1:\n print(\"进入公式操作\")\n self.judgeFormula(OperStr)\n print(\"结束公式操作\")\n if OperStr.find(\"函数\") !=-1:\n print(\"进入函数操作\")\n self.judgeFunction(OperStr)\n print(\"结束函数操作\")\n if OperStr.find(\"所有框线\") !=-1:\n print(\"进入所有框线操作\")\n self.judgeBoxLine(OperStr)\n print(\"结束所有框线操作\")\n if OperStr.find(\"排序\") !=-1:\n print(\"进入排序操作\")\n self.judgeSort(OperStr)\n print(\"结束排序操作\")\n if OperStr.find(\"图表\") !=-1:\n print(\"进入图表操作\")\n self.judgeChart(OperStr)\n print(\"结束图表操作\")\n pass\n \n'''\nPPT操作部分说明:\n1.动画效果\n2.切换效果\n3.超级链接\n4.背景\n5.插入,图片,声音,视频\n\n考试样例:\n1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。\n2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。\n3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。\n4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。\n5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。\n'''\n\nclass PptOperation:\n def __init__(self):\n pass\n def AnimationOper(self):\n pass\n def SwitchOper(self):\n pass\n def InsertOper(self,style):\n pass\n def BackgroundOper(self):\n pass\n def HyperlinkOper(self):\n pass\n def judgeAnimation(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeSwitch(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBackground(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeHyperlink(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n \n if OperStr.find(\"动画\") !=-1:\n print(\"进入动画操作\")\n self.judgeAnimation(OperStr)\n print(\"结束动画操作\")\n if OperStr.find(\"切换\") !=-1:\n print(\"进入切换操作\")\n self.judgeSwitch(OperStr)\n print(\"结束切换操作\")\n if OperStr.find(\"超级链接\") !=-1:\n print(\"进入超级链接操作\")\n self.judgeHyperlink(OperStr)\n print(\"结束超级链接操作\")\n if OperStr.find(\"背景\") !=-1:\n print(\"进入背景操作\")\n self.judgeBackground(OperStr)\n print(\"结束背景操作\")\n if OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n \n'''\nInput文字录入操作部分说明:\n考试波及知识点:\ncom对象的调用演示:\nclass InputOperation:\n'''\nclass OperationTypeJudge:\n def __init__(self):\n pass\n def getType(self,OperStr):\n if OperStr.find(\"替换\") !=-1 or OperStr.find(\"首行缩进\") !=-1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find(\"公式\") !=-1 or OperStr.find(\"函数\") !=-1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find(\"切换\") !=-1 or OperStr.find(\"动画\") !=-1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n def getOperaPath(self):\n pass\n def getOperaFileName(self):\n pass\n'''\n选择题部分说明:\n''' \nclass SelectOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass\n \n'''\n判断题部分说明:\n''' \nclass JudgeOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass \nif __name__ == \"__main__\":\n win=WinOperation()\n win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。')\n win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。')\n win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。')\n win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。')\n\n word=WordOperation()\n word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。')\n word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。')\n word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。')\n word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。')\n word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。')\n\n excel=ExcelOperation(r'c:/test.xls')\n excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。')\n excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。')\n excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。')\n excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。')\n excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。')\n\n ppt=PptOperation()\n ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。')\n ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。')\n ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。')\n ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。')\n ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')\n",
"step-ids": [
42,
52,
64,
71,
87
]
}
|
[
42,
52,
64,
71,
87
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class face_verifier:
<|reserved_special_token_0|>
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class face_verifier:
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',
device='cuda'):
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = 'face_recognition/model_ir_se50.pth'
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
<|reserved_special_token_1|>
from face_recognition.model import Backbone
import torch
import numpy
class face_verifier:
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',
device='cuda'):
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = 'face_recognition/model_ir_se50.pth'
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
<|reserved_special_token_1|>
from face_recognition.model import Backbone
import torch
import numpy
class face_verifier():
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode="ir_se", device="cuda"):
# create model
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = "face_recognition/model_ir_se50.pth"
# load model
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
# 0: same / 1: ambiguous / 2: different
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7: # same
return 0
elif sim > 0.5: # ambiguous
return 1
else:
return 2
|
flexible
|
{
"blob_id": "0659df48bb150582917e333a7a25d2d25395dfda",
"index": 1381,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass face_verifier:\n <mask token>\n\n def verify_person(self, f1, f2):\n batch_tensor = torch.cat([f1, f2], 0)\n output_feat = self.model(batch_tensor.cuda())\n sim = torch.nn.CosineSimilarity(dim=0)\n sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()\n if sim > 0.7:\n return 0\n elif sim > 0.5:\n return 1\n else:\n return 2\n",
"step-3": "<mask token>\n\n\nclass face_verifier:\n\n def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',\n device='cuda'):\n self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)\n save_path = 'face_recognition/model_ir_se50.pth'\n self.model.load_state_dict(torch.load(save_path))\n self.model.eval()\n\n def verify_person(self, f1, f2):\n batch_tensor = torch.cat([f1, f2], 0)\n output_feat = self.model(batch_tensor.cuda())\n sim = torch.nn.CosineSimilarity(dim=0)\n sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()\n if sim > 0.7:\n return 0\n elif sim > 0.5:\n return 1\n else:\n return 2\n",
"step-4": "from face_recognition.model import Backbone\nimport torch\nimport numpy\n\n\nclass face_verifier:\n\n def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',\n device='cuda'):\n self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)\n save_path = 'face_recognition/model_ir_se50.pth'\n self.model.load_state_dict(torch.load(save_path))\n self.model.eval()\n\n def verify_person(self, f1, f2):\n batch_tensor = torch.cat([f1, f2], 0)\n output_feat = self.model(batch_tensor.cuda())\n sim = torch.nn.CosineSimilarity(dim=0)\n sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()\n if sim > 0.7:\n return 0\n elif sim > 0.5:\n return 1\n else:\n return 2\n",
"step-5": "from face_recognition.model import Backbone\r\nimport torch\r\nimport numpy\r\n\r\nclass face_verifier():\r\n def __init__(self, net_depth=50, drop_ratio=0.6, net_mode=\"ir_se\", device=\"cuda\"):\r\n # create model\r\n self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)\r\n save_path = \"face_recognition/model_ir_se50.pth\"\r\n # load model\r\n self.model.load_state_dict(torch.load(save_path))\r\n self.model.eval()\r\n\r\n def verify_person(self, f1, f2):\r\n # 0: same / 1: ambiguous / 2: different\r\n batch_tensor = torch.cat([f1, f2], 0)\r\n output_feat = self.model(batch_tensor.cuda())\r\n sim = torch.nn.CosineSimilarity(dim=0)\r\n sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()\r\n if sim > 0.7: # same\r\n return 0\r\n elif sim > 0.5: # ambiguous\r\n return 1\r\n else:\r\n return 2\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def EPGAD(ReportPath, Hi=None, GUi=None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ''
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld, Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def EPGAinit(IDPath):
idt = open(IDPath, 'rt').read()
Qti = etree.fromstring(idt)
print('Loading Identifiers')
print('Quasi Specifiers..')
print(', '.join(Qti.keys()))
print('Applying EPGAD_Init on Qti')
gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.
values())]
hmacKey = ''
for i in gQti:
hmacKey += i
Gi = sha256(hmacKey.encode()).hexdigest()
countObj = getLast(Gi)
GiObj = pickle.loads(countObj.GiObj)
if GiObj['cQueue'].empty():
if 'count' not in GiObj.keys():
GiObj['count'] = 0
count = 0
else:
GiObj['count'] += 1
count = GiObj['count']
countObj.GiObj = pickle.dumps(GiObj)
saveCount(countObj)
prime = 179426549
if count >= prime:
raise Exception('Prime Exceeded')
else:
res = count ** 2 % prime
if count <= prime / 2:
GUi = res
else:
GUi = prime - res
Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256
).hexdigest()
return Hi, GUi
def EPGAD(ReportPath, Hi=None, GUi=None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ''
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld, Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def EPGAinit(IDPath):
idt = open(IDPath, 'rt').read()
Qti = etree.fromstring(idt)
print('Loading Identifiers')
print('Quasi Specifiers..')
print(', '.join(Qti.keys()))
print('Applying EPGAD_Init on Qti')
gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.
values())]
hmacKey = ''
for i in gQti:
hmacKey += i
Gi = sha256(hmacKey.encode()).hexdigest()
countObj = getLast(Gi)
GiObj = pickle.loads(countObj.GiObj)
if GiObj['cQueue'].empty():
if 'count' not in GiObj.keys():
GiObj['count'] = 0
count = 0
else:
GiObj['count'] += 1
count = GiObj['count']
countObj.GiObj = pickle.dumps(GiObj)
saveCount(countObj)
prime = 179426549
if count >= prime:
raise Exception('Prime Exceeded')
else:
res = count ** 2 % prime
if count <= prime / 2:
GUi = res
else:
GUi = prime - res
Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256
).hexdigest()
return Hi, GUi
def EPGAD(ReportPath, Hi=None, GUi=None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ''
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld, Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
if __name__ == '__main__':
Hi, GUi = EPGAinit('sampleIdentity.xml')
EPGAD('sampleReport.json', Hi, GUi)
<|reserved_special_token_1|>
from lxml import etree
from utils import generalize_or_supress
from hashlib import sha256
from count import getLast, saveCount
import pickle
from hmac import new
from random import random
from json import loads
from bigchain import putonBlockChain, findRecord
def EPGAinit(IDPath):
idt = open(IDPath, 'rt').read()
Qti = etree.fromstring(idt)
print('Loading Identifiers')
print('Quasi Specifiers..')
print(', '.join(Qti.keys()))
print('Applying EPGAD_Init on Qti')
gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.
values())]
hmacKey = ''
for i in gQti:
hmacKey += i
Gi = sha256(hmacKey.encode()).hexdigest()
countObj = getLast(Gi)
GiObj = pickle.loads(countObj.GiObj)
if GiObj['cQueue'].empty():
if 'count' not in GiObj.keys():
GiObj['count'] = 0
count = 0
else:
GiObj['count'] += 1
count = GiObj['count']
countObj.GiObj = pickle.dumps(GiObj)
saveCount(countObj)
prime = 179426549
if count >= prime:
raise Exception('Prime Exceeded')
else:
res = count ** 2 % prime
if count <= prime / 2:
GUi = res
else:
GUi = prime - res
Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256
).hexdigest()
return Hi, GUi
def EPGAD(ReportPath, Hi=None, GUi=None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ''
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld, Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
if __name__ == '__main__':
Hi, GUi = EPGAinit('sampleIdentity.xml')
EPGAD('sampleReport.json', Hi, GUi)
<|reserved_special_token_1|>
# This implementation of EPG takes data as XML and produces corresponding pseudonymized data
from lxml import etree
from utils import generalize_or_supress
from hashlib import sha256
from count import getLast, saveCount
import pickle
from hmac import new
from random import random
from json import loads
from bigchain import putonBlockChain, findRecord
def EPGAinit(IDPath):
idt = open(IDPath,'rt').read()
Qti = etree.fromstring(idt)
print('Loading Identifiers')
print('Quasi Specifiers..')
print(', '.join(Qti.keys()))
print('Applying EPGAD_Init on Qti')
gQti = [generalize_or_supress(i[1],i[0]) for i in zip(Qti.keys(),Qti.values())]
hmacKey = ""
for i in gQti:
hmacKey+=i
Gi = sha256(hmacKey.encode()).hexdigest()
countObj = getLast(Gi)
GiObj = pickle.loads(countObj.GiObj)
if GiObj['cQueue'].empty():
if 'count' not in GiObj.keys():
GiObj['count'] = 0
count = 0
else:
GiObj['count']+=1
count = GiObj['count']
countObj.GiObj = pickle.dumps(GiObj)
saveCount(countObj)
prime = 179426549
if count >= prime:
raise Exception('Prime Exceeded')
else:
res = count**2%prime
if count <= prime/2:
GUi = res
else:
GUi = prime - res
Hi = new(Gi.encode() + str(GUi).encode() , hmacKey.encode() , sha256).hexdigest()
return Hi, GUi
def EPGAD(ReportPath, Hi=None, GUi = None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ""
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld,Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
if __name__ == "__main__":
Hi, GUi = EPGAinit('sampleIdentity.xml')
EPGAD('sampleReport.json', Hi, GUi)
|
flexible
|
{
"blob_id": "8f554166c28fe4c9a093568a97d39b6ba515241b",
"index": 3196,
"step-1": "<mask token>\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\nif __name__ == '__main__':\n Hi, GUi = EPGAinit('sampleIdentity.xml')\n EPGAD('sampleReport.json', Hi, GUi)\n",
"step-4": "from lxml import etree\nfrom utils import generalize_or_supress\nfrom hashlib import sha256\nfrom count import getLast, saveCount\nimport pickle\nfrom hmac import new\nfrom random import random\nfrom json import loads\nfrom bigchain import putonBlockChain, findRecord\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\nif __name__ == '__main__':\n Hi, GUi = EPGAinit('sampleIdentity.xml')\n EPGAD('sampleReport.json', Hi, GUi)\n",
"step-5": "# This implementation of EPG takes data as XML and produces corresponding pseudonymized data\n\nfrom lxml import etree\nfrom utils import generalize_or_supress\nfrom hashlib import sha256\nfrom count import getLast, saveCount\nimport pickle\nfrom hmac import new\nfrom random import random\nfrom json import loads\nfrom bigchain import putonBlockChain, findRecord\n\ndef EPGAinit(IDPath):\n\tidt = open(IDPath,'rt').read()\n\n\tQti = etree.fromstring(idt)\n\n\tprint('Loading Identifiers')\n\tprint('Quasi Specifiers..')\n\tprint(', '.join(Qti.keys()))\n\tprint('Applying EPGAD_Init on Qti')\n\t\n\tgQti = [generalize_or_supress(i[1],i[0]) for i in zip(Qti.keys(),Qti.values())]\n\n\thmacKey = \"\"\n\n\tfor i in gQti:\n\t\thmacKey+=i\n\n\tGi = sha256(hmacKey.encode()).hexdigest()\n\n\tcountObj = getLast(Gi)\n\tGiObj = pickle.loads(countObj.GiObj)\n\n\tif GiObj['cQueue'].empty():\n\t\tif 'count' not in GiObj.keys():\n\t\t\tGiObj['count'] = 0\n\t\t\tcount = 0\n\t\telse:\n\t\t\tGiObj['count']+=1\n\t\t\tcount = GiObj['count']\n\t\tcountObj.GiObj = pickle.dumps(GiObj)\n\t\tsaveCount(countObj)\n\n\tprime = 179426549\n\n\tif count >= prime:\n\t\t raise Exception('Prime Exceeded')\n\n\telse:\n\t\tres = count**2%prime\n\t\tif count <= prime/2:\n\t\t\tGUi = res\n\t\telse:\n\t\t\tGUi = prime - res\n\n\tHi = new(Gi.encode() + str(GUi).encode() , hmacKey.encode() , sha256).hexdigest()\n\treturn Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi = None):\n\tif Hi == None:\n\t\tHi = sha256(str(random()).encode()).hexdigest()\n\tjsn = open(ReportPath, 'rt').read()\n\tjsnld = loads(jsn)\n\tprint('Report Loaded')\n\tprint('Finding Subject Information')\n\tif 'subject' in jsnld.keys():\n\t\tprint('Subject Information Found')\n\t\tif 'display' in jsnld['subject'].keys():\n\t\t\tjsnld['subject']['display'] = \"\"\n\t\t\tprint('Subject Display Found and Suppressed')\n\t\tif 'reference' in jsnld['subject'].keys():\n\t\t\tjsnld['subject']['reference'] = Hi\n\t\t\tprint('Replacing Identifier with ', Hi)\n\n\tprint('Placing Record Asset on BlockChain')\n\tprint()\n\ttxid = putonBlockChain(jsnld,Hi, GUi)\n\tprint('Status OK. Retrieving Transaction')\n\tfindRecord(txid)\n\nif __name__ == \"__main__\":\n\tHi, GUi = EPGAinit('sampleIdentity.xml')\n\tEPGAD('sampleReport.json', Hi, GUi)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(was)
<|reserved_special_token_1|>
was = input()
print(was)
|
flexible
|
{
"blob_id": "e12c411814efd7cc7417174b51f0f756589ca40b",
"index": 3325,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(was)\n",
"step-3": "was = input()\nprint(was)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
-------------------------------------------------------
Stack utilities
-------------------------------------------------------
Author: Evan Attfield
ID: 180817010
Email: [email protected]
__updated__ = "Jan 22, 2019"
-------------------------------------------------------
"""
from Stack_array import Stack
from Queue_array import Queue
from Priority_Queue_array import Priority_Queue
from List_array import List
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
def stack_to_array(stack, target):
"""
-------------------------------------------------------
Pops contents of stack into target. At finish, stack is empty.
Top value of stack is at end of target,
bottom value of stack is at beginning of target.
Use: stack_to_array(stack, target)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while stack.is_empty() == False:
temp = stack.pop()
target.insert(0, temp) #adds temp to the beginning, while append adds temp to the end
return
def stack_test(source):
"""
-------------------------------------------------------
Tests the methods of Stack for empty and
non-empty stacks using the data in source:
is_empty, push, pop, peek
(Testing pop and peek while empty throws exceptions)
Use: stack_test(source)
-------------------------------------------------------
Parameters:
source - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
stack = Stack()
dummy = []
if stack.is_empty() == True:
print('Stack is empty.')
array_to_stack(stack, source)
print('Converting source into a stack...')
if stack.is_empty() == False:
print('source has been transferred into stack!')
print('\nPopping stack...')
while stack.is_empty() == False:
temp = stack.pop()
print(temp)
dummy.append(temp)
print('\nstack is empty. Pushing values back into stack...')
while dummy != []:
temp = dummy.pop()
print(temp)
stack.push(temp)
print('\nPushing complete! Peeking...')
print(stack.peek())
return
def array_to_queue(queue, source):
"""
-------------------------------------------------------
Inserts contents of source into queue. At finish, source is empty.
Last value in source is at rear of queue,
first value in source is at front of queue.
Use: array_to_queue(queue, source)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
queue.insert(temp)
return
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
def pq_to_array(pq, target):
"""
-------------------------------------------------------
Removes contents of pq into target. At finish, pq is empty.
Highest priority value in pq is at front of target,
lowest priority value in pq is at end of target.
Use: pq_to_array(pq, target)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while pq.is_empty() == False:
temp = pq.remove()
target.append(temp)
return
def queue_test(a):
"""
-------------------------------------------------------
Tests queue implementation.
Use: queue_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Queue are tested for both empty and
non-empty queues using the data in a:
is_empty, insert, remove, peek, len
-------------------------------------------------------
"""
queue = Queue()
dummy = []
if queue.is_empty() == True:
print('Queue is empty.')
array_to_queue(queue, a)
print('Converting a into a queue...')
if queue.is_empty() == False:
print('a has been transferred into queue!')
print('\nRemoving queue...')
while queue.is_empty() == False:
temp = queue.remove()
print(temp)
dummy.append(temp)
print('\nqueue is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
queue.insert(temp)
print('\nPushing complete! Peeking...')
print(queue.peek())
print('\nqueue is {} objects long!'.format(len(queue)))
return
def priority_queue_test(a):
"""
-------------------------------------------------------
Tests priority queue implementation.
Use: pq_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Priority_Queue are tested for both empty and
non-empty priority queues using the data in a:
is_empty, insert, remove, peek
-------------------------------------------------------
"""
pq = Priority_Queue()
dummy = []
if pq.is_empty() == True:
print('pq is empty.')
array_to_pq(pq, a)
print('Converting a into a pq...')
if pq.is_empty() == False:
print('a has been transferred into pq!')
print('\nRemoving pq...')
while pq.is_empty() == False:
temp = pq.remove()
print(temp)
dummy.append(temp)
print('\pq is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
pq.insert(temp)
print('\nPushing complete! Peeking...')
print(pq.peek())
print('\npq is {} objects long!'.format(len(pq)))
return
def array_to_list(llist, source):
"""
-------------------------------------------------------
Appends contests of source to llist. At finish, source is empty.
Last element in source is at rear of llist,
first element in source is at front of llist.
Use: array_to_list(llist, source)
-------------------------------------------------------
Parameters:
llist - a List object (List)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source: #a list is considered True as long as it is not empty
llist.append(source.pop(0))
return
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
def list_test(a):
"""
-------------------------------------------------------
Tests list implementation.
The methods of List are tested for both empty and
non-empty lists using the data in a:
is_empty, insert, remove, append, index, __contains__,
find, count, max, min, __getitem__, __setitem__
Use: list_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
lst = List()
if lst.is_empty() == True:
print('lst is empty.')
array_to_list(lst, a)
print('Converting a into a lst...')
if lst.is_empty() == False:
print('a has been transferred into lst!')
print('The movie at index 0 is {}'.format(lst[0]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('Now the movie at index 0 is {}'.format(lst[0]))
print('/nInserting the movie at index 1...')
lst.insert(1, temp)
print('Now the movie at index 1 is {}'.format(lst[1]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('/nAppending the movie...')
lst.append(temp)
print('Peeking...')
print(lst.peek())
print('/nThe index of the movie is {}'.format(lst.index(temp)))
print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))
print('/nThe max is {}'. format(lst.max()))
print('The min is {}'. format(lst.min()))
print('/nThe movie is at index {}'.format(lst.find(temp)))
return
|
normal
|
{
"blob_id": "dab9b58b08b562d902ee0ae1104198cb1ebbffe5",
"index": 1928,
"step-1": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\n<mask token>\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\n<mask token>\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\n<mask token>\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\n<mask token>\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-3": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\ndef pq_to_array(pq, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of pq into target. At finish, pq is empty.\n Highest priority value in pq is at front of target,\n lowest priority value in pq is at end of target.\n Use: pq_to_array(pq, target)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while pq.is_empty() == False:\n temp = pq.remove()\n target.append(temp)\n return\n\n\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Use: queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Queue are tested for both empty and \n non-empty queues using the data in a:\n is_empty, insert, remove, peek, len\n -------------------------------------------------------\n \"\"\"\n queue = Queue()\n dummy = []\n if queue.is_empty() == True:\n print('Queue is empty.')\n array_to_queue(queue, a)\n print('Converting a into a queue...')\n if queue.is_empty() == False:\n print('a has been transferred into queue!')\n print('\\nRemoving queue...')\n while queue.is_empty() == False:\n temp = queue.remove()\n print(temp)\n dummy.append(temp)\n print('\\nqueue is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n queue.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(queue.peek())\n print('\\nqueue is {} objects long!'.format(len(queue)))\n return\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\ndef array_to_list(llist, source):\n \"\"\"\n -------------------------------------------------------\n Appends contests of source to llist. At finish, source is empty.\n Last element in source is at rear of llist, \n first element in source is at front of llist.\n Use: array_to_list(llist, source)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source:\n llist.append(source.pop(0))\n return\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-4": "<mask token>\nfrom Stack_array import Stack\nfrom Queue_array import Queue\nfrom Priority_Queue_array import Priority_Queue\nfrom List_array import List\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\ndef pq_to_array(pq, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of pq into target. At finish, pq is empty.\n Highest priority value in pq is at front of target,\n lowest priority value in pq is at end of target.\n Use: pq_to_array(pq, target)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while pq.is_empty() == False:\n temp = pq.remove()\n target.append(temp)\n return\n\n\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Use: queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Queue are tested for both empty and \n non-empty queues using the data in a:\n is_empty, insert, remove, peek, len\n -------------------------------------------------------\n \"\"\"\n queue = Queue()\n dummy = []\n if queue.is_empty() == True:\n print('Queue is empty.')\n array_to_queue(queue, a)\n print('Converting a into a queue...')\n if queue.is_empty() == False:\n print('a has been transferred into queue!')\n print('\\nRemoving queue...')\n while queue.is_empty() == False:\n temp = queue.remove()\n print(temp)\n dummy.append(temp)\n print('\\nqueue is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n queue.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(queue.peek())\n print('\\nqueue is {} objects long!'.format(len(queue)))\n return\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\ndef array_to_list(llist, source):\n \"\"\"\n -------------------------------------------------------\n Appends contests of source to llist. At finish, source is empty.\n Last element in source is at rear of llist, \n first element in source is at front of llist.\n Use: array_to_list(llist, source)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source:\n llist.append(source.pop(0))\n return\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-5": "\"\"\"\r\n-------------------------------------------------------\r\nStack utilities\r\n-------------------------------------------------------\r\nAuthor: Evan Attfield\r\nID: 180817010\r\nEmail: [email protected]\r\n__updated__ = \"Jan 22, 2019\"\r\n-------------------------------------------------------\r\n\"\"\"\r\nfrom Stack_array import Stack\r\nfrom Queue_array import Queue\r\nfrom Priority_Queue_array import Priority_Queue\r\nfrom List_array import List\r\n\r\ndef array_to_stack(stack, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Pushes contents of source onto stack. At finish, source is empty.\r\n Last value in source is at bottom of stack, \r\n first value in source is on top of stack.\r\n Use: array_to_stack(stack, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n stack - a Stack object (Stack)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n \r\n while source != []:\r\n temp = source.pop()\r\n stack.push(temp)\r\n \r\n return \r\n\r\ndef stack_to_array(stack, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Pops contents of stack into target. At finish, stack is empty.\r\n Top value of stack is at end of target,\r\n bottom value of stack is at beginning of target.\r\n Use: stack_to_array(stack, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n stack - a Stack object (Stack)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n \r\n while stack.is_empty() == False:\r\n temp = stack.pop()\r\n target.insert(0, temp) #adds temp to the beginning, while append adds temp to the end\r\n return \r\n \r\ndef stack_test(source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests the methods of Stack for empty and \r\n non-empty stacks using the data in source:\r\n is_empty, push, pop, peek\r\n (Testing pop and peek while empty throws exceptions)\r\n Use: stack_test(source)\r\n -------------------------------------------------------\r\n Parameters:\r\n source - list of data (list of ?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n stack = Stack()\r\n dummy = []\r\n if stack.is_empty() == True:\r\n print('Stack is empty.')\r\n \r\n array_to_stack(stack, source)\r\n print('Converting source into a stack...')\r\n \r\n if stack.is_empty() == False:\r\n print('source has been transferred into stack!')\r\n \r\n print('\\nPopping stack...')\r\n while stack.is_empty() == False:\r\n temp = stack.pop()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\nstack is empty. Pushing values back into stack...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n stack.push(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(stack.peek())\r\n \r\n return\r\n \r\ndef array_to_queue(queue, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Inserts contents of source into queue. At finish, source is empty.\r\n Last value in source is at rear of queue, \r\n first value in source is at front of queue.\r\n Use: array_to_queue(queue, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n queue - a Queue object (Queue)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while source != []:\r\n temp = source.pop(0)\r\n queue.insert(temp)\r\n\r\n return\r\ndef queue_to_array(queue, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of queue into target. At finish, queue is empty.\r\n Front value of queue is at front of target,\r\n rear value of queue is at end of target.\r\n Use: queue_to_array(queue, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n queue - a Queue object (Queue)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while queue.is_empty() == False:\r\n temp = queue.remove()\r\n target.append(temp)\r\n\r\n return\r\n\r\ndef array_to_pq(pq, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Inserts contents of source into pq. At finish, source is empty.\r\n Last value in source is at rear of pq, \r\n first value in source is at front of pq.\r\n Use: array_to_pq(pq, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n pq - a Priority_Queue object (Priority_Queue)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while source != []:\r\n temp = source.pop(0)\r\n pq.insert(temp)\r\n\r\n return\r\n\r\ndef pq_to_array(pq, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of pq into target. At finish, pq is empty.\r\n Highest priority value in pq is at front of target,\r\n lowest priority value in pq is at end of target.\r\n Use: pq_to_array(pq, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n pq - a Priority_Queue object (Priority_Queue)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while pq.is_empty() == False:\r\n temp = pq.remove()\r\n target.append(temp)\r\n \r\n return\r\n\r\ndef queue_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests queue implementation.\r\n Use: queue_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n the methods of Queue are tested for both empty and \r\n non-empty queues using the data in a:\r\n is_empty, insert, remove, peek, len\r\n -------------------------------------------------------\r\n \"\"\"\r\n queue = Queue()\r\n dummy = []\r\n if queue.is_empty() == True:\r\n print('Queue is empty.')\r\n \r\n array_to_queue(queue, a)\r\n print('Converting a into a queue...')\r\n \r\n if queue.is_empty() == False:\r\n print('a has been transferred into queue!')\r\n \r\n print('\\nRemoving queue...')\r\n while queue.is_empty() == False:\r\n temp = queue.remove()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\nqueue is empty. Inserting values back into queue...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n queue.insert(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(queue.peek())\r\n \r\n print('\\nqueue is {} objects long!'.format(len(queue)))\r\n\r\n return\r\n\r\ndef priority_queue_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests priority queue implementation.\r\n Use: pq_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n the methods of Priority_Queue are tested for both empty and \r\n non-empty priority queues using the data in a:\r\n is_empty, insert, remove, peek\r\n -------------------------------------------------------\r\n \"\"\"\r\n pq = Priority_Queue()\r\n dummy = []\r\n if pq.is_empty() == True:\r\n print('pq is empty.')\r\n \r\n array_to_pq(pq, a)\r\n print('Converting a into a pq...')\r\n \r\n if pq.is_empty() == False:\r\n print('a has been transferred into pq!')\r\n \r\n print('\\nRemoving pq...')\r\n while pq.is_empty() == False:\r\n temp = pq.remove()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\pq is empty. Inserting values back into queue...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n pq.insert(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(pq.peek())\r\n \r\n print('\\npq is {} objects long!'.format(len(pq)))\r\n\r\n return\r\n\r\ndef array_to_list(llist, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Appends contests of source to llist. At finish, source is empty.\r\n Last element in source is at rear of llist, \r\n first element in source is at front of llist.\r\n Use: array_to_list(llist, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n llist - a List object (List)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while source: #a list is considered True as long as it is not empty\r\n llist.append(source.pop(0))\r\n \r\n return\r\n\r\ndef list_to_array(llist, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of llist into target. At finish, llist is empty.\r\n Front element of llist is at front of target,\r\n rear element of llist is at rear of target.\r\n Use: list_to_array(llist, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n llist - a List object (List)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while llist.is_empty() == False:\r\n target.append(llist.pop(0))\r\n \r\n return\r\n\r\ndef list_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests list implementation.\r\n The methods of List are tested for both empty and \r\n non-empty lists using the data in a:\r\n is_empty, insert, remove, append, index, __contains__,\r\n find, count, max, min, __getitem__, __setitem__\r\n Use: list_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n lst = List()\r\n \r\n if lst.is_empty() == True:\r\n print('lst is empty.')\r\n \r\n array_to_list(lst, a)\r\n print('Converting a into a lst...')\r\n \r\n if lst.is_empty() == False:\r\n print('a has been transferred into lst!')\r\n \r\n print('The movie at index 0 is {}'.format(lst[0]))\r\n \r\n print('/nRemoving the movie at index 0...')\r\n temp = lst.remove(lst[0])\r\n print('Now the movie at index 0 is {}'.format(lst[0]))\r\n \r\n print('/nInserting the movie at index 1...')\r\n lst.insert(1, temp)\r\n print('Now the movie at index 1 is {}'.format(lst[1]))\r\n \r\n print('/nRemoving the movie at index 0...')\r\n temp = lst.remove(lst[0])\r\n \r\n print('/nAppending the movie...')\r\n lst.append(temp)\r\n \r\n print('Peeking...')\r\n print(lst.peek())\r\n \r\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\r\n \r\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\r\n \r\n print('/nThe max is {}'. format(lst.max()))\r\n print('The min is {}'. format(lst.min()))\r\n \r\n print('/nThe movie is at index {}'.format(lst.find(temp)))\r\n \r\n \r\n\r\n return\r\n",
"step-ids": [
4,
9,
12,
13,
14
]
}
|
[
4,
9,
12,
13,
14
] |
from web3.auto.infura import w3
import json
import os
with open("contract_abi.json") as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)
#myfilter.fromBlock = "16181508"
#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})
print(abi)
print (myfilter)
|
normal
|
{
"blob_id": "8921c0a17e90f7113d1e0be630a15fc9d74d1780",
"index": 8519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\n<mask token>\nprint(abi)\nprint(myfilter)\n",
"step-3": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-4": "from web3.auto.infura import w3\nimport json\nimport os\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-5": "from web3.auto.infura import w3\nimport json\nimport os\n\nwith open(\"contract_abi.json\") as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)\n#myfilter.fromBlock = \"16181508\"\n#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})\nprint(abi)\nprint (myfilter)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def getworld_data(url, header):
headers = header
res = requests.get(url, headers=headers)
res.encoding = 'UTF-8'
pattern = re.compile(
'(\'\\{"(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\\}\')'
, re.S)
end = re.findall(pattern, res.text)
a = str(end[0])
with open('test.txt', 'w') as f:
f.write(a)
data_relative_confirmed_json = []
pattern_1 = re.compile(
'(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}'
, re.S)
end_1 = re.findall(pattern_1, a)
return end_1
<|reserved_special_token_0|>
def write_json_to_csv(data_relative_confirmed_json, end_1):
write_list_to_json(data_relative_confirmed_json,
'20200517-world-active-data.json', 'E:/python_code/world_cov19')
data_csv = pd.DataFrame(json.loads(open(
'20200517-world-active-data.json', 'r+').read()))
print(end_1[36][0])
care = end_1[36][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[36][6].replace('/', ',').replace('/', ',').replace('"', ''
).split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[36][5].replace('[', '').replace(']', '').
split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
except:
pass
date.insert(0, 'Country')
cols = date
data_csv = data_csv.loc[:, cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df = pd.read_csv('20200517-world-active-data.json.csv')
new_csv = df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getworld_data(url, header):
headers = header
res = requests.get(url, headers=headers)
res.encoding = 'UTF-8'
pattern = re.compile(
'(\'\\{"(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\\}\')'
, re.S)
end = re.findall(pattern, res.text)
a = str(end[0])
with open('test.txt', 'w') as f:
f.write(a)
data_relative_confirmed_json = []
pattern_1 = re.compile(
'(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}'
, re.S)
end_1 = re.findall(pattern_1, a)
return end_1
def count_time(end_1):
data_relative_confirmed_json = []
country = []
for i in range(len(end_1)):
data = {'Country': ''}
data['Country'] = end_1[i][0]
country.append(end_1[i][0])
care = end_1[i][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[i][6].replace('/', ',').replace('/', ',').replace('"',
'').split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[i][5].replace('[', '').replace(']', ''
).split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime(
'%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
date_json = OrderedDict(data, **time_care)
data_relative_confirmed_json.append(date_json)
except:
pass
return data_relative_confirmed_json
def write_json_to_csv(data_relative_confirmed_json, end_1):
write_list_to_json(data_relative_confirmed_json,
'20200517-world-active-data.json', 'E:/python_code/world_cov19')
data_csv = pd.DataFrame(json.loads(open(
'20200517-world-active-data.json', 'r+').read()))
print(end_1[36][0])
care = end_1[36][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[36][6].replace('/', ',').replace('/', ',').replace('"', ''
).split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[36][5].replace('[', '').replace(']', '').
split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
except:
pass
date.insert(0, 'Country')
cols = date
data_csv = data_csv.loc[:, cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df = pd.read_csv('20200517-world-active-data.json.csv')
new_csv = df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def write_list_to_json(list, json_file_name, json_file_save_path):
os.chdir(json_file_save_path)
with open(json_file_name, 'w') as f:
json.dump(list, f)
def getworld_data(url, header):
headers = header
res = requests.get(url, headers=headers)
res.encoding = 'UTF-8'
pattern = re.compile(
'(\'\\{"(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\\}\')'
, re.S)
end = re.findall(pattern, res.text)
a = str(end[0])
with open('test.txt', 'w') as f:
f.write(a)
data_relative_confirmed_json = []
pattern_1 = re.compile(
'(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}'
, re.S)
end_1 = re.findall(pattern_1, a)
return end_1
def count_time(end_1):
data_relative_confirmed_json = []
country = []
for i in range(len(end_1)):
data = {'Country': ''}
data['Country'] = end_1[i][0]
country.append(end_1[i][0])
care = end_1[i][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[i][6].replace('/', ',').replace('/', ',').replace('"',
'').split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[i][5].replace('[', '').replace(']', ''
).split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime(
'%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
date_json = OrderedDict(data, **time_care)
data_relative_confirmed_json.append(date_json)
except:
pass
return data_relative_confirmed_json
def write_json_to_csv(data_relative_confirmed_json, end_1):
write_list_to_json(data_relative_confirmed_json,
'20200517-world-active-data.json', 'E:/python_code/world_cov19')
data_csv = pd.DataFrame(json.loads(open(
'20200517-world-active-data.json', 'r+').read()))
print(end_1[36][0])
care = end_1[36][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[36][6].replace('/', ',').replace('/', ',').replace('"', ''
).split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[36][5].replace('[', '').replace(']', '').
split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
except:
pass
date.insert(0, 'Country')
cols = date
data_csv = data_csv.loc[:, cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df = pd.read_csv('20200517-world-active-data.json.csv')
new_csv = df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
<|reserved_special_token_1|>
import re
import requests
import numpy as np
import json
import os
from collections import OrderedDict
import pandas as pd
import json
import datetime
import time
def write_list_to_json(list, json_file_name, json_file_save_path):
os.chdir(json_file_save_path)
with open(json_file_name, 'w') as f:
json.dump(list, f)
def getworld_data(url, header):
headers = header
res = requests.get(url, headers=headers)
res.encoding = 'UTF-8'
pattern = re.compile(
'(\'\\{"(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\\}\')'
, re.S)
end = re.findall(pattern, res.text)
a = str(end[0])
with open('test.txt', 'w') as f:
f.write(a)
data_relative_confirmed_json = []
pattern_1 = re.compile(
'(\\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}'
, re.S)
end_1 = re.findall(pattern_1, a)
return end_1
def count_time(end_1):
data_relative_confirmed_json = []
country = []
for i in range(len(end_1)):
data = {'Country': ''}
data['Country'] = end_1[i][0]
country.append(end_1[i][0])
care = end_1[i][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[i][6].replace('/', ',').replace('/', ',').replace('"',
'').split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[i][5].replace('[', '').replace(']', ''
).split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime(
'%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
date_json = OrderedDict(data, **time_care)
data_relative_confirmed_json.append(date_json)
except:
pass
return data_relative_confirmed_json
def write_json_to_csv(data_relative_confirmed_json, end_1):
write_list_to_json(data_relative_confirmed_json,
'20200517-world-active-data.json', 'E:/python_code/world_cov19')
data_csv = pd.DataFrame(json.loads(open(
'20200517-world-active-data.json', 'r+').read()))
print(end_1[36][0])
care = end_1[36][5].replace('[', '').replace(']', '').split(',')
try:
time = end_1[36][6].replace('/', ',').replace('/', ',').replace('"', ''
).split(',')
print(time)
time[2] = '2020'
date = []
in_date = time[2] + '-' + time[0] + '-' + time[1]
dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')
for k in range(len(end_1[36][5].replace('[', '').replace(']', '').
split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')
date.append(out_date)
print(date)
time_care = OrderedDict(zip(date, care))
print(time_care)
except:
pass
date.insert(0, 'Country')
cols = date
data_csv = data_csv.loc[:, cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df = pd.read_csv('20200517-world-active-data.json.csv')
new_csv = df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
<|reserved_special_token_1|>
import re
import requests
import numpy as np
import json
import os
from collections import OrderedDict
import pandas as pd
import json
import datetime
import time
#将数组写入json文件方便pandas的读取
def write_list_to_json(list, json_file_name, json_file_save_path):
os.chdir(json_file_save_path)
with open(json_file_name, 'w') as f:
json.dump(list, f)
#获取数据算法
def getworld_data(url,header):
headers = header
res = requests.get(url,headers = headers)
res.encoding = "UTF-8"
pattern = re.compile('(\'\{"(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}\}\')',re.S)
end = re.findall(pattern,res.text)
a=str(end[0])
with open('test.txt','w') as f:
f.write(a)
data_relative_confirmed_json=[]
pattern_1 = re.compile('(\w+)":{"active":(.*?),"confirmed":(.*?),"deaths":(.*?),"recovered":(.*?),"relative_active":(.*?),"relative_active_start_date":(.*?),"relative_confirmed":(.*?),"relative_confirmed_start_date":(.*?),"relative_deaths":(.*?),"relative_deaths_start_date":(.*?),"relative_recovered":(.*?),"relative_recovered_start_date":(.*?)}',re.S)
end_1=re.findall(pattern_1,a)
return end_1
#时间推算算法及数据写入
def count_time(end_1):
data_relative_confirmed_json=[]
country=[]
for i in range(len(end_1)):
data={
'Country':'',
}
data['Country']=end_1[i][0]
#确诊人数
country.append(end_1[i][0])
care=end_1[i][5].replace('[','').replace(']','').split(',')
try:
time=end_1[i][6].replace('/',',').replace('/',',').replace('"','').split(',')
print(time)
time[2]='2020'
date=[]
in_date = time[2]+'-'+time[0]+'-'+time[1]
dt = datetime.datetime.strptime(in_date, "%Y-%m-%d")
for k in range(len(end_1[i][5].replace('[','').replace(']','').split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
dt=datetime.datetime.strptime(out_date, "%Y-%m-%d")
date.append(out_date)
print(date)
time_care=OrderedDict(zip(date,care))
print(time_care)
date_json=OrderedDict(data,**time_care)
data_relative_confirmed_json.append(date_json)
except:
pass
return data_relative_confirmed_json
def write_json_to_csv(data_relative_confirmed_json,end_1):
write_list_to_json(data_relative_confirmed_json,'20200517-world-active-data.json','E:/python_code/world_cov19')
data_csv=pd.DataFrame(json.loads(open('20200517-world-active-data.json','r+').read()))
print(end_1[36][0])
care=end_1[36][5].replace('[','').replace(']','').split(',')
try:
time=end_1[36][6].replace('/',',').replace('/',',').replace('"','').split(',')
print(time)
time[2]='2020'
date=[]
in_date = time[2]+'-'+time[0]+'-'+time[1]
dt = datetime.datetime.strptime(in_date, "%Y-%m-%d")
for k in range(len(end_1[36][5].replace('[','').replace(']','').split(','))):
out_date = (dt + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
dt=datetime.datetime.strptime(out_date, "%Y-%m-%d")
date.append(out_date)
print(date)
time_care=OrderedDict(zip(date,care))
print(time_care)
except:
pass
date.insert(0,'Country')
cols=date
data_csv=data_csv.loc[:,cols]
data_csv.T
data_csv.to_csv('20200517-world-active-data.json.csv')
df=pd.read_csv('20200517-world-active-data.json.csv')
new_csv=df.T
new_csv.to_csv('20200517-world-active-data.json.csv')
|
flexible
|
{
"blob_id": "0677e12bc9733c76bff7ed3fe83e3800e64e9a10",
"index": 7633,
"step-1": "<mask token>\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\n<mask token>\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n",
"step-2": "<mask token>\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n",
"step-3": "<mask token>\n\n\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n",
"step-4": "import re\nimport requests\nimport numpy as np\nimport json\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport json\nimport datetime\nimport time\n\n\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n\ndef getworld_data(url, header):\n headers = header\n res = requests.get(url, headers=headers)\n res.encoding = 'UTF-8'\n pattern = re.compile(\n '(\\'\\\\{\"(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\\\}\\')'\n , re.S)\n end = re.findall(pattern, res.text)\n a = str(end[0])\n with open('test.txt', 'w') as f:\n f.write(a)\n data_relative_confirmed_json = []\n pattern_1 = re.compile(\n '(\\\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}'\n , re.S)\n end_1 = re.findall(pattern_1, a)\n return end_1\n\n\ndef count_time(end_1):\n data_relative_confirmed_json = []\n country = []\n for i in range(len(end_1)):\n data = {'Country': ''}\n data['Country'] = end_1[i][0]\n country.append(end_1[i][0])\n care = end_1[i][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[i][6].replace('/', ',').replace('/', ',').replace('\"',\n '').split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[i][5].replace('[', '').replace(']', ''\n ).split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\n '%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n date_json = OrderedDict(data, **time_care)\n data_relative_confirmed_json.append(date_json)\n except:\n pass\n return data_relative_confirmed_json\n\n\ndef write_json_to_csv(data_relative_confirmed_json, end_1):\n write_list_to_json(data_relative_confirmed_json,\n '20200517-world-active-data.json', 'E:/python_code/world_cov19')\n data_csv = pd.DataFrame(json.loads(open(\n '20200517-world-active-data.json', 'r+').read()))\n print(end_1[36][0])\n care = end_1[36][5].replace('[', '').replace(']', '').split(',')\n try:\n time = end_1[36][6].replace('/', ',').replace('/', ',').replace('\"', ''\n ).split(',')\n print(time)\n time[2] = '2020'\n date = []\n in_date = time[2] + '-' + time[0] + '-' + time[1]\n dt = datetime.datetime.strptime(in_date, '%Y-%m-%d')\n for k in range(len(end_1[36][5].replace('[', '').replace(']', '').\n split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n dt = datetime.datetime.strptime(out_date, '%Y-%m-%d')\n date.append(out_date)\n print(date)\n time_care = OrderedDict(zip(date, care))\n print(time_care)\n except:\n pass\n date.insert(0, 'Country')\n cols = date\n data_csv = data_csv.loc[:, cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df = pd.read_csv('20200517-world-active-data.json.csv')\n new_csv = df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n",
"step-5": "import re\nimport requests\nimport numpy as np\nimport json\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport json\nimport datetime\nimport time\n#将数组写入json文件方便pandas的读取\ndef write_list_to_json(list, json_file_name, json_file_save_path):\n os.chdir(json_file_save_path)\n with open(json_file_name, 'w') as f:\n json.dump(list, f)\n\n#获取数据算法\ndef getworld_data(url,header):\n headers = header\n res = requests.get(url,headers = headers)\n res.encoding = \"UTF-8\"\n pattern = re.compile('(\\'\\{\"(\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}\\}\\')',re.S)\n end = re.findall(pattern,res.text)\n a=str(end[0])\n with open('test.txt','w') as f:\n f.write(a)\n data_relative_confirmed_json=[]\n pattern_1 = re.compile('(\\w+)\":{\"active\":(.*?),\"confirmed\":(.*?),\"deaths\":(.*?),\"recovered\":(.*?),\"relative_active\":(.*?),\"relative_active_start_date\":(.*?),\"relative_confirmed\":(.*?),\"relative_confirmed_start_date\":(.*?),\"relative_deaths\":(.*?),\"relative_deaths_start_date\":(.*?),\"relative_recovered\":(.*?),\"relative_recovered_start_date\":(.*?)}',re.S)\n end_1=re.findall(pattern_1,a)\n return end_1\n\n#时间推算算法及数据写入\ndef count_time(end_1):\n data_relative_confirmed_json=[]\n country=[]\n for i in range(len(end_1)):\n data={\n 'Country':'',\n }\n data['Country']=end_1[i][0]\n #确诊人数\n country.append(end_1[i][0])\n care=end_1[i][5].replace('[','').replace(']','').split(',')\n try:\n time=end_1[i][6].replace('/',',').replace('/',',').replace('\"','').split(',')\n print(time)\n time[2]='2020'\n date=[]\n in_date = time[2]+'-'+time[0]+'-'+time[1]\n dt = datetime.datetime.strptime(in_date, \"%Y-%m-%d\")\n for k in range(len(end_1[i][5].replace('[','').replace(']','').split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n dt=datetime.datetime.strptime(out_date, \"%Y-%m-%d\")\n date.append(out_date)\n print(date)\n time_care=OrderedDict(zip(date,care))\n print(time_care)\n date_json=OrderedDict(data,**time_care)\n data_relative_confirmed_json.append(date_json)\n \n except:\n pass\n return data_relative_confirmed_json\n\ndef write_json_to_csv(data_relative_confirmed_json,end_1):\n write_list_to_json(data_relative_confirmed_json,'20200517-world-active-data.json','E:/python_code/world_cov19')\n data_csv=pd.DataFrame(json.loads(open('20200517-world-active-data.json','r+').read()))\n print(end_1[36][0])\n care=end_1[36][5].replace('[','').replace(']','').split(',')\n try:\n time=end_1[36][6].replace('/',',').replace('/',',').replace('\"','').split(',')\n print(time)\n time[2]='2020'\n date=[]\n in_date = time[2]+'-'+time[0]+'-'+time[1]\n dt = datetime.datetime.strptime(in_date, \"%Y-%m-%d\")\n for k in range(len(end_1[36][5].replace('[','').replace(']','').split(','))):\n out_date = (dt + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n dt=datetime.datetime.strptime(out_date, \"%Y-%m-%d\")\n date.append(out_date)\n print(date)\n time_care=OrderedDict(zip(date,care))\n print(time_care)\n except:\n pass\n date.insert(0,'Country')\n cols=date\n data_csv=data_csv.loc[:,cols]\n data_csv.T\n data_csv.to_csv('20200517-world-active-data.json.csv')\n df=pd.read_csv('20200517-world-active-data.json.csv')\n new_csv=df.T\n new_csv.to_csv('20200517-world-active-data.json.csv')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# ---------------------MODULE 1 notes--------------------
# .
# .
# .
# .
# .
# .
# .
# .
# .
# .
# save as (file).py first if not it will not work
print("Hello")
# control s to save
|
normal
|
{
"blob_id": "bb64da929ff2e1e04267518ec93a28bedb5a4de5",
"index": 7306,
"step-1": "<mask token>\n",
"step-2": "print('Hello')\n",
"step-3": "# ---------------------MODULE 1 notes--------------------\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n# .\r\n\r\n# save as (file).py first if not it will not work\r\nprint(\"Hello\")\r\n\r\n# control s to save\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .data_processing import make_request_data, clear_request_data, get_token_from_text
from .review import Review
|
flexible
|
{
"blob_id": "5d654c056e6ef01e72821427c4f8dcb285755ee9",
"index": 2933,
"step-1": "<mask token>\n",
"step-2": "from .data_processing import make_request_data, clear_request_data, get_token_from_text\nfrom .review import Review\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import numpy as np
import scipy.sparse as sparse
from .world import World
from . import util
from . import fem
from . import linalg
def solveFine(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)
freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
AFineFree = AFine[freeFine][:, freeFine]
bFineFree = bFine[freeFine]
uFineFree = linalg.linSolve(AFineFree, bFineFree)
uFineFull = np.zeros(NpFine)
uFineFull[freeFine] = uFineFree
uFineFull = uFineFull
return uFineFull, AFine, MFine
def solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NCoarseElement = world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
NpCoarse = np.prod(NWorldCoarse + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)
freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
bCoarse = basis.T * bFine
ACoarse = basis.T * (AFine * basis)
ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]
bCoarseFree = bCoarse[freeCoarse]
uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)
uCoarseFull = np.zeros(NpCoarse)
uCoarseFull[freeCoarse] = uCoarseFree
uCoarseFull = uCoarseFull
uFineFull = basis * uCoarseFull
return uCoarseFull, uFineFull
|
normal
|
{
"blob_id": "1b3493322fa85c2fe26a7f308466c4a1c72d5b35",
"index": 4637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-3": "<mask token>\n\n\ndef solveFine(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)\n freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n AFineFree = AFine[freeFine][:, freeFine]\n bFineFree = bFine[freeFine]\n uFineFree = linalg.linSolve(AFineFree, bFineFree)\n uFineFull = np.zeros(NpFine)\n uFineFull[freeFine] = uFineFree\n uFineFull = uFineFull\n return uFineFull, AFine, MFine\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-4": "import numpy as np\nimport scipy.sparse as sparse\nfrom .world import World\nfrom . import util\nfrom . import fem\nfrom . import linalg\n\n\ndef solveFine(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)\n freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n AFineFree = AFine[freeFine][:, freeFine]\n bFineFree = bFine[freeFine]\n uFineFree = linalg.linSolve(AFineFree, bFineFree)\n uFineFull = np.zeros(NpFine)\n uFineFull[freeFine] = uFineFree\n uFineFull = uFineFull\n return uFineFull, AFine, MFine\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#coding=utf-8
#author:maohan
#date:20160706
#decription:通过百度api获取相关信息,并保存为xls格式
#ver:1.0
import urllib2
import json
import sys
from pyExcelerator import *
def bd_finder(qw,region,page_num):
page_size='20'
bd_ak='wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'
bd_url='http://api.map.baidu.com/place/v2/search?'
furl=bd_url+'q='+qw+'&page_size='+page_size+'&page_num='+page_num+'®ion='+region+'&output=json&ak='+bd_ak
page = urllib2.urlopen(furl)
html=page.read()
data=json.loads(html)
w=Workbook()
ws=w.add_sheet('test')
str1='医院名称'
str2='医院地址'
str3='电话号码'
str4='维度'
str5='经度'
ws.write(0,0,str1.decode('utf-8'))
ws.write(0,1,str2.decode('utf-8'))
ws.write(0,2,str3.decode('utf-8'))
ws.write(0,3,str4.decode('utf-8'))
ws.write(0,4,str5.decode('utf-8'))
# print type(data['results'])
# print len(data['results'])
count=0
for i in data['results']:
# print("名称:%-35s") %(i.get('name'))
# print("-------地址:%-35s") %(i.get('address'))
# print("-------电话:%-35s") %(i.get('telephone'))
# print("-------定位:(维度:%-10s)(经度:%-10s)") %(i.get('location')['lat'],i.get('location')['lng'])
# print (format("","100"))
count+=1
ws.write(count,0,'%s' %(i.get('name')))
ws.write(count,1,'%s' %(i.get('address')))
ws.write(count,2,'%s' %(i.get('telephone')))
ws.write(count,3,'%s' %(i.get('location')['lat']))
ws.write(count,4,'%s' %(i.get('location')['lng']))
w.save('test.xls')
for k in xrange(0,10):
bd_finder('医院','武汉',str(k))
|
normal
|
{
"blob_id": "941a93c66a5131712f337ad055bbf2a93e6ec10d",
"index": 1634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef bd_finder(qw, region, page_num):\n page_size = '20'\n bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'\n bd_url = 'http://api.map.baidu.com/place/v2/search?'\n furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +\n page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)\n page = urllib2.urlopen(furl)\n html = page.read()\n data = json.loads(html)\n w = Workbook()\n ws = w.add_sheet('test')\n str1 = '医院名称'\n str2 = '医院地址'\n str3 = '电话号码'\n str4 = '维度'\n str5 = '经度'\n ws.write(0, 0, str1.decode('utf-8'))\n ws.write(0, 1, str2.decode('utf-8'))\n ws.write(0, 2, str3.decode('utf-8'))\n ws.write(0, 3, str4.decode('utf-8'))\n ws.write(0, 4, str5.decode('utf-8'))\n count = 0\n for i in data['results']:\n count += 1\n ws.write(count, 0, '%s' % i.get('name'))\n ws.write(count, 1, '%s' % i.get('address'))\n ws.write(count, 2, '%s' % i.get('telephone'))\n ws.write(count, 3, '%s' % i.get('location')['lat'])\n ws.write(count, 4, '%s' % i.get('location')['lng'])\n w.save('test.xls')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef bd_finder(qw, region, page_num):\n page_size = '20'\n bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'\n bd_url = 'http://api.map.baidu.com/place/v2/search?'\n furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +\n page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)\n page = urllib2.urlopen(furl)\n html = page.read()\n data = json.loads(html)\n w = Workbook()\n ws = w.add_sheet('test')\n str1 = '医院名称'\n str2 = '医院地址'\n str3 = '电话号码'\n str4 = '维度'\n str5 = '经度'\n ws.write(0, 0, str1.decode('utf-8'))\n ws.write(0, 1, str2.decode('utf-8'))\n ws.write(0, 2, str3.decode('utf-8'))\n ws.write(0, 3, str4.decode('utf-8'))\n ws.write(0, 4, str5.decode('utf-8'))\n count = 0\n for i in data['results']:\n count += 1\n ws.write(count, 0, '%s' % i.get('name'))\n ws.write(count, 1, '%s' % i.get('address'))\n ws.write(count, 2, '%s' % i.get('telephone'))\n ws.write(count, 3, '%s' % i.get('location')['lat'])\n ws.write(count, 4, '%s' % i.get('location')['lng'])\n w.save('test.xls')\n\n\nfor k in xrange(0, 10):\n bd_finder('医院', '武汉', str(k))\n",
"step-4": "import urllib2\nimport json\nimport sys\nfrom pyExcelerator import *\n\n\ndef bd_finder(qw, region, page_num):\n page_size = '20'\n bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'\n bd_url = 'http://api.map.baidu.com/place/v2/search?'\n furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +\n page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)\n page = urllib2.urlopen(furl)\n html = page.read()\n data = json.loads(html)\n w = Workbook()\n ws = w.add_sheet('test')\n str1 = '医院名称'\n str2 = '医院地址'\n str3 = '电话号码'\n str4 = '维度'\n str5 = '经度'\n ws.write(0, 0, str1.decode('utf-8'))\n ws.write(0, 1, str2.decode('utf-8'))\n ws.write(0, 2, str3.decode('utf-8'))\n ws.write(0, 3, str4.decode('utf-8'))\n ws.write(0, 4, str5.decode('utf-8'))\n count = 0\n for i in data['results']:\n count += 1\n ws.write(count, 0, '%s' % i.get('name'))\n ws.write(count, 1, '%s' % i.get('address'))\n ws.write(count, 2, '%s' % i.get('telephone'))\n ws.write(count, 3, '%s' % i.get('location')['lat'])\n ws.write(count, 4, '%s' % i.get('location')['lng'])\n w.save('test.xls')\n\n\nfor k in xrange(0, 10):\n bd_finder('医院', '武汉', str(k))\n",
"step-5": "#!/usr/bin/env python\n#coding=utf-8\n#author:maohan\n#date:20160706\n#decription:通过百度api获取相关信息,并保存为xls格式\n#ver:1.0\nimport urllib2\nimport json\nimport sys\nfrom pyExcelerator import *\ndef bd_finder(qw,region,page_num):\n\tpage_size='20'\n\tbd_ak='wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'\n\tbd_url='http://api.map.baidu.com/place/v2/search?'\n\tfurl=bd_url+'q='+qw+'&page_size='+page_size+'&page_num='+page_num+'®ion='+region+'&output=json&ak='+bd_ak\n\tpage = urllib2.urlopen(furl)\n\thtml=page.read()\n\tdata=json.loads(html)\n\tw=Workbook()\n\tws=w.add_sheet('test')\n\tstr1='医院名称'\n\tstr2='医院地址'\n\tstr3='电话号码'\n\tstr4='维度'\n\tstr5='经度'\n\tws.write(0,0,str1.decode('utf-8'))\n\tws.write(0,1,str2.decode('utf-8'))\n\tws.write(0,2,str3.decode('utf-8'))\n\tws.write(0,3,str4.decode('utf-8'))\n\tws.write(0,4,str5.decode('utf-8'))\n\t# print type(data['results'])\n\t# print len(data['results'])\n\tcount=0\n\tfor i in data['results']:\n\t\t# print(\"名称:%-35s\") %(i.get('name'))\n\t\t# print(\"-------地址:%-35s\") %(i.get('address'))\n\t\t# print(\"-------电话:%-35s\") %(i.get('telephone'))\n\t\t# print(\"-------定位:(维度:%-10s)(经度:%-10s)\") %(i.get('location')['lat'],i.get('location')['lng'])\n\t\t# print (format(\"\",\"100\"))\n\t\tcount+=1\n\t\tws.write(count,0,'%s' %(i.get('name')))\n\t\tws.write(count,1,'%s' %(i.get('address')))\n\t\tws.write(count,2,'%s' %(i.get('telephone')))\n\t\tws.write(count,3,'%s' %(i.get('location')['lat']))\n\t\tws.write(count,4,'%s' %(i.get('location')['lng']))\n\tw.save('test.xls')\nfor k in xrange(0,10):\n\tbd_finder('医院','武汉',str(k))\n\n\t",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrgStaff(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrgStaff(db.Model):
__tablename__ = 'org_staff'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=
'CASCADE'))
invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=
'CASCADE'))
org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',
ondelete='CASCADE'))
user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')
referer = db.relationship('User', primaryjoin=
'User.id==OrgStaff.invited_by')
org = db.relationship('Organisation', primaryjoin=
'Organisation.id==OrgStaff.org_id', backref='staff')
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.
func.now())
<|reserved_special_token_1|>
from app import db
class OrgStaff(db.Model):
__tablename__ = 'org_staff'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=
'CASCADE'))
invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=
'CASCADE'))
org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',
ondelete='CASCADE'))
user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')
referer = db.relationship('User', primaryjoin=
'User.id==OrgStaff.invited_by')
org = db.relationship('Organisation', primaryjoin=
'Organisation.id==OrgStaff.org_id', backref='staff')
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.
func.now())
<|reserved_special_token_1|>
from app import db
class OrgStaff(db.Model):
__tablename__ = 'org_staff'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete="CASCADE"))
user = db.relationship("User", primaryjoin="User.id==OrgStaff.user_id")
referer = db.relationship("User", primaryjoin="User.id==OrgStaff.invited_by")
org = db.relationship("Organisation", primaryjoin="Organisation.id==OrgStaff.org_id", backref='staff')
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
|
flexible
|
{
"blob_id": "b0f92b5e4cc972aca84a29b4568e85836f155273",
"index": 3774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass OrgStaff(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-4": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-5": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete=\"CASCADE\"))\n user = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.user_id\")\n referer = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.invited_by\")\n org = db.relationship(\"Organisation\", primaryjoin=\"Organisation.id==OrgStaff.org_id\", backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import wx
class test(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self,parent,id,"TestFrame",size=(500,500))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = test(parent=None,id=-1,)
frame.show()
app.mainloop()
|
normal
|
{
"blob_id": "e204cbbf36ac180eba0e95916345088c77bca7c0",
"index": 5001,
"step-1": "<mask token>\n\n\nclass test(wx.Frame):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n",
"step-4": "import wx\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n",
"step-5": "#!/usr/bin/python\nimport wx\n\nclass test(wx.Frame):\n def __init__(self,parent,id):\n wx.Frame.__init__(self,parent,id,\"TestFrame\",size=(500,500))\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None,id=-1,)\n frame.show()\n app.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
for i in range(0, 20):
if i % 20 == 0:
print('Stop It')
else:
print('The For Loop Failed')
|
normal
|
{
"blob_id": "bfb2d7b811fd450b53493375fa130649349d308f",
"index": 174,
"step-1": "<mask token>\n",
"step-2": "for i in range(0, 20):\n if i % 20 == 0:\n print('Stop It')\nelse:\n print('The For Loop Failed')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class Net(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _initialize_weights(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,
1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,
1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())
<|reserved_special_token_0|>
def _initialize_weights(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,
1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,
1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())
def forward(self, x):
x = x.float()
out = self.block0(x)
res0_1 = out
res0_2 = out
res0_3 = self.side0_3(out)
out = self.block1(out)
res1_2 = out
res1_3 = self.side1_3(out)
out = out + res0_1
out = self.block2(out)
res2_3 = self.side2_3(out)
out = out + res0_2 + res1_2
out = self.block3(out)
out = out + res0_3 + res1_3 + res2_3
out = self.fc(out)
return out
def _initialize_weights(self):
pass
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,
1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))
self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1
)), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,
1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(4))
self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.
LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())
def forward(self, x):
x = x.float()
out = self.block0(x)
res0_1 = out
res0_2 = out
res0_3 = self.side0_3(out)
out = self.block1(out)
res1_2 = out
res1_3 = self.side1_3(out)
out = out + res0_1
out = self.block2(out)
res2_3 = self.side2_3(out)
out = out + res0_2 + res1_2
out = self.block3(out)
out = out + res0_3 + res1_3 + res2_3
out = self.fc(out)
return out
def _initialize_weights(self):
pass
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
# add DenseNet structure
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.x = x
self.block0 = nn.Sequential(
# input image 96x96
nn.ReLU(),
nn.Conv2d(3, 64, (5, 5), (1, 1), (2, 2)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block1 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block2 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block3 = nn.Sequential(
nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(32),
nn.Conv2d(32, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side0_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side1_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side2_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.fc = nn.Sequential(
nn.Conv2d(4, 1, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
def forward(self, x):
x=x.float()
out = self.block0(x) # 64x96x96
res0_1 = out
res0_2 = out
res0_3 = self.side0_3(out)
out = self.block1(out) # 64x96x96
res1_2 = out
res1_3 = self.side1_3(out)
out = out + res0_1
out = self.block2(out) # 64x96x96
res2_3 = self.side2_3(out)
out = out + res0_2 + res1_2
out = self.block3(out) # 4x96x96
out = out + res0_3 + res1_3 + res2_3
out = self.fc(out)
return out
def _initialize_weights(self):
pass
|
flexible
|
{
"blob_id": "49cdeb59e75ed93122b3a62fbdc508b7d66166d6",
"index": 2337,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n <mask token>\n <mask token>\n\n def _initialize_weights(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n <mask token>\n\n def _initialize_weights(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n\n def forward(self, x):\n x = x.float()\n out = self.block0(x)\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n out = self.block1(out)\n res1_2 = out\n res1_3 = self.side1_3(out)\n out = out + res0_1\n out = self.block2(out)\n res2_3 = self.side2_3(out)\n out = out + res0_2 + res1_2\n out = self.block3(out)\n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n return out\n\n def _initialize_weights(self):\n pass\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.block0 = nn.Sequential(nn.ReLU(), nn.Conv2d(3, 64, (5, 5), (1,\n 1), (2, 2)), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block1 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block2 = nn.Sequential(nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(64))\n self.block3 = nn.Sequential(nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1\n )), nn.LeakyReLU(0.1), nn.BatchNorm2d(32), nn.Conv2d(32, 4, (1,\n 1), (1, 1)), nn.LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side0_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side1_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.side2_3 = nn.Sequential(nn.Conv2d(64, 4, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(4))\n self.fc = nn.Sequential(nn.Conv2d(4, 1, (1, 1), (1, 1)), nn.\n LeakyReLU(0.1), nn.BatchNorm2d(1), nn.Sigmoid())\n\n def forward(self, x):\n x = x.float()\n out = self.block0(x)\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n out = self.block1(out)\n res1_2 = out\n res1_3 = self.side1_3(out)\n out = out + res0_1\n out = self.block2(out)\n res2_3 = self.side2_3(out)\n out = out + res0_2 + res1_2\n out = self.block3(out)\n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n return out\n\n def _initialize_weights(self):\n pass\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\n# add DenseNet structure\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n# self.x = x\n self.block0 = nn.Sequential(\n # input image 96x96\n nn.ReLU(),\n nn.Conv2d(3, 64, (5, 5), (1, 1), (2, 2)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64),\n\n )\n \n self.block1 = nn.Sequential(\n nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64),\n )\n \n self.block2 = nn.Sequential(\n nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(64), \n )\n \n self.block3 = nn.Sequential(\n nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side0_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side1_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.side2_3 = nn.Sequential(\n nn.Conv2d(64, 4, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(4),\n )\n \n self.fc = nn.Sequential(\n nn.Conv2d(4, 1, (1, 1), (1, 1)),\n nn.LeakyReLU(0.1),\n nn.BatchNorm2d(1),\n nn.Sigmoid()\n )\n \n \n def forward(self, x):\n x=x.float()\n out = self.block0(x) # 64x96x96\n res0_1 = out\n res0_2 = out\n res0_3 = self.side0_3(out)\n \n out = self.block1(out) # 64x96x96\n res1_2 = out\n res1_3 = self.side1_3(out)\n \n out = out + res0_1\n out = self.block2(out) # 64x96x96\n res2_3 = self.side2_3(out)\n \n out = out + res0_2 + res1_2\n out = self.block3(out) # 4x96x96\n \n out = out + res0_3 + res1_3 + res2_3\n out = self.fc(out)\n\n return out\n \n \n def _initialize_weights(self):\n pass",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def get_U_S_T_from_WM(WM):
U = {}
S = {}
T = {}
for wm in WM.WM:
U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
U['SI'] = 0.073
U['FW'] = 0.028
U['Q'] = Q
S['SI'] = 0
S['FW'] = 0
T['SI'] = 0
T['FW'] = 0
T['Q'] = 1
return U, S, T
<|reserved_special_token_0|>
def get_U_from_x(x):
U = {}
U['PWS'] = x[0]
U['AWS'] = x[1]
U['DWS'] = x[2]
U['PWN'] = x[3]
U['AWN'] = x[4]
U['FW'] = x[5]
U['SI'] = x[6]
U['Q'] = x[7]
return U
<|reserved_special_token_0|>
def run_inverse_model(zz, U, S, T):
dv = -AM[zz].dot(x0[zz])
if zz == 'base':
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
elif zz == 'massbal':
Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])
Evec = array([(xx / 5) for xx in x0[zz]])
E = diag(Evec)
Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
xbase = x0[zz] + xsol_Ad
P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +
linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase = get_U_from_x(xbase)
Ue = get_U_from_x(P)
return Ubase, Ue, xbase
<|reserved_special_token_0|>
def plot_base_case_simple(Ubase, Ue, plt):
f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,
gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))
alf = 0.75
capi = 7
axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[
coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',
'DWS']], capsize=capi, alpha=alf)
axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')
ylimi = 20
axx[0].set_ylim(-ylimi, ylimi)
ylimi = 4
axx[1].set_ylim(-ylimi, ylimi)
axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color
=[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in
['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)
axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',
color='k')
axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue
['SI'] + Ue['FW'], capsize=capi, alpha=alf)
axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')
fwlim = 0.2
axx[2].set_ylim(-fwlim, fwlim)
fsz = 14
axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)
axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],
yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)
axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')
for ii in range(3):
axx[ii].axhline(0, color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS', 'DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',
bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',
bbox_inches='tight')
<|reserved_special_token_0|>
def get_a_b_fracs(Ubase, S):
a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[
'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *
(S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
return a, b
<|reserved_special_token_0|>
def plot_adep():
for ii, kk in enumerate(a):
plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))
xlabel('$\\mathbf{1-\\epsilon}$\nfraction of PWN in PWS')
ylabel('$\\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0, 1)
axhline(0, color='k')
legend()
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')
<|reserved_special_token_0|>
def get_mats_from_dic(sivar):
Svec = array([float(ff) for ff in sivar])
Tvec = array([float(ff) for ff in sivar[Svec[0]]])
simats = {}
for QQ, kk in enumerate(Ubase):
simats[kk] = zeros((len(Svec), len(Tvec)))
for ii, ss in enumerate(Svec):
for jj, tt in enumerate(Tvec):
simats[kk][ii, jj] = sivar[ss][tt][QQ]
return Svec, Tvec, simats
<|reserved_special_token_0|>
def plot_SIresponse():
f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)
axivec = array([])
for axirow in axx:
for axi in axirow:
axivec = hstack((axivec, axi))
for axi, kk in zip(axivec, simats):
if (kk == 'FW') | (kk == 'SI'):
climi = 10
contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) *
1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)
axi.set_title(kk + ' [mSv]')
cbar = colorbar(contit, ax=axi, format='%1.0f')
elif kk == 'Q':
climi = 30
contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -
Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.
PiYG_r)
axi.set_title(kk + ' [TW]')
cbar = colorbar(contit, ax=axi, format='%2.0f')
else:
climi = 0.3
contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],
vmin=-climi, vmax=climi, cmap=cm.PuOr_r)
axi.set_title(kk + ' [Sv]')
cbar = colorbar(contit, ax=axi, format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va=
'center', rotation='vertical', fontsize=14)
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=
'tight')
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=
'tight')
<|reserved_special_token_0|>
def plot_adep_pw():
f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)
f.subplots_adjust(wspace=0.3)
for ii, var in enumerate([a_pwmat, b_pwmat]):
if ii == 0:
xvar = 1 - epsilon
xvar2 = 1
xvar3 = 0
else:
xvar = epsilon
xvar2 = 0
xvar3 = 1
axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, linewidth=3, color='k', label=
'Base case', zorder=5)
axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[
'SI']) * 1000.0, color='purple', zorder=4, label=
'Polar Waters fresher by 0.5', linewidth=3)
axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)
axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw
['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')
axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)
axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +
U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')
axx[ii].set_ylim(-30, 140)
axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +
U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=
'Add 20 mSv of Fresh Water')
axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[
'SI']) * 1000.0, linewidth=3, color=fwcol)
axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, 'o', color=fwcol, label='')
axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',
color=fwcol, label='')
axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,
color=fwcol, label='')
axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, ash, color=fwcol, label='')
axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)
axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)
axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)
axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)
axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)
axx[0].set_title('a) Estuarine limb', fontsize=14)
axx[1].set_title('b) Overturning limb', fontsize=14)
axx[0].set_ylabel(
'$\\mathbf{\\delta}\\ U_{FW}$\nFW transport in $\\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel(
'$\\mathbf{\\gamma}\\ U_{FW}$\nFW transport in $\\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel(
'$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\nPWN transport in $\\mathbf{PWS}$ [Sv]'
)
axx[1].set_xlabel(
'$\\mathbf{\\epsilon} \\ U_{PWN}$\nPWN transport in $\\mathbf{DWS}$ [Sv]'
)
for axi in (axx[0], axx[1]):
axi.axhline(0, color='k')
axi.set_xlim(-0.05, 2.2)
axx[0].axhline(56, color='k', linestyle='--')
axx[1].axhline(37, color='k', linestyle='--')
savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')
savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')
<|reserved_special_token_0|>
def get_PWN_from_FW(x2, y1, y2, y3):
x3 = (y3 - y1) * x2 / (y2 - y1)
return x3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_U_S_T_from_WM(WM):
U = {}
S = {}
T = {}
for wm in WM.WM:
U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
U['SI'] = 0.073
U['FW'] = 0.028
U['Q'] = Q
S['SI'] = 0
S['FW'] = 0
T['SI'] = 0
T['FW'] = 0
T['Q'] = 1
return U, S, T
<|reserved_special_token_0|>
def get_U_from_x(x):
U = {}
U['PWS'] = x[0]
U['AWS'] = x[1]
U['DWS'] = x[2]
U['PWN'] = x[3]
U['AWN'] = x[4]
U['FW'] = x[5]
U['SI'] = x[6]
U['Q'] = x[7]
return U
<|reserved_special_token_0|>
def run_inverse_model(zz, U, S, T):
dv = -AM[zz].dot(x0[zz])
if zz == 'base':
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
elif zz == 'massbal':
Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])
Evec = array([(xx / 5) for xx in x0[zz]])
E = diag(Evec)
Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
xbase = x0[zz] + xsol_Ad
P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +
linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase = get_U_from_x(xbase)
Ue = get_U_from_x(P)
return Ubase, Ue, xbase
<|reserved_special_token_0|>
def plot_base_case_simple(Ubase, Ue, plt):
f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,
gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))
alf = 0.75
capi = 7
axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[
coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',
'DWS']], capsize=capi, alpha=alf)
axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')
ylimi = 20
axx[0].set_ylim(-ylimi, ylimi)
ylimi = 4
axx[1].set_ylim(-ylimi, ylimi)
axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color
=[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in
['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)
axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',
color='k')
axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue
['SI'] + Ue['FW'], capsize=capi, alpha=alf)
axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')
fwlim = 0.2
axx[2].set_ylim(-fwlim, fwlim)
fsz = 14
axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)
axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],
yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)
axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')
for ii in range(3):
axx[ii].axhline(0, color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS', 'DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',
bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',
bbox_inches='tight')
<|reserved_special_token_0|>
def get_a_b_fracs(Ubase, S):
a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[
'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *
(S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
return a, b
<|reserved_special_token_0|>
def plot_adep():
for ii, kk in enumerate(a):
plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))
xlabel('$\\mathbf{1-\\epsilon}$\nfraction of PWN in PWS')
ylabel('$\\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0, 1)
axhline(0, color='k')
legend()
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')
<|reserved_special_token_0|>
def get_mats_from_dic(sivar):
Svec = array([float(ff) for ff in sivar])
Tvec = array([float(ff) for ff in sivar[Svec[0]]])
simats = {}
for QQ, kk in enumerate(Ubase):
simats[kk] = zeros((len(Svec), len(Tvec)))
for ii, ss in enumerate(Svec):
for jj, tt in enumerate(Tvec):
simats[kk][ii, jj] = sivar[ss][tt][QQ]
return Svec, Tvec, simats
<|reserved_special_token_0|>
def plot_SIresponse():
f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)
axivec = array([])
for axirow in axx:
for axi in axirow:
axivec = hstack((axivec, axi))
for axi, kk in zip(axivec, simats):
if (kk == 'FW') | (kk == 'SI'):
climi = 10
contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) *
1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)
axi.set_title(kk + ' [mSv]')
cbar = colorbar(contit, ax=axi, format='%1.0f')
elif kk == 'Q':
climi = 30
contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -
Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.
PiYG_r)
axi.set_title(kk + ' [TW]')
cbar = colorbar(contit, ax=axi, format='%2.0f')
else:
climi = 0.3
contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],
vmin=-climi, vmax=climi, cmap=cm.PuOr_r)
axi.set_title(kk + ' [Sv]')
cbar = colorbar(contit, ax=axi, format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va=
'center', rotation='vertical', fontsize=14)
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=
'tight')
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=
'tight')
<|reserved_special_token_0|>
def lineplot_PW_salinity():
f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)
xind = -1
yind = -1
svr = len(PWS_Svec)
xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],
[(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]
ufw_tot = -Ubase['SI'] - Ubase['FW']
yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,
pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(
[(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in
range(svr)])]
yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -
Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in
range(svr)])]
xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']
titvec = ['a) Vary PWN salinity\n\nPWS = 34.4',
'b) Vary PWS salinity\n\nPWN = 33.7', 'c) Vary both PW salinities']
lw = 2
for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:
axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for
ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)
for ii in range(3):
ax1 = axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)
ax2 = axx[ii].twinx()
ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=
'limegreen', linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10, 10)
ax2.set_ylim(-40, 40)
axx[ii].set_title(titvec[ii], fontweight='bold')
if ii != 2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])
axx[0].set_ylim(-1.5, 1.5)
axx[0].set_yticks(arange(-1, 1.1, 0.5))
ax2.spines['right'].set_position(('axes', 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')
ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2 = axx[2].twiny()
axi2.set_xticks(arange(32.8, 33.8, 0.2))
axi2.set_xlim(xvar[0][0], xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4 - 0.5, color='k', zorder=0)
savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')
savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')
<|reserved_special_token_0|>
def plot_adep_pw():
f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)
f.subplots_adjust(wspace=0.3)
for ii, var in enumerate([a_pwmat, b_pwmat]):
if ii == 0:
xvar = 1 - epsilon
xvar2 = 1
xvar3 = 0
else:
xvar = epsilon
xvar2 = 0
xvar3 = 1
axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, linewidth=3, color='k', label=
'Base case', zorder=5)
axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[
'SI']) * 1000.0, color='purple', zorder=4, label=
'Polar Waters fresher by 0.5', linewidth=3)
axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)
axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw
['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')
axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)
axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +
U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')
axx[ii].set_ylim(-30, 140)
axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +
U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=
'Add 20 mSv of Fresh Water')
axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[
'SI']) * 1000.0, linewidth=3, color=fwcol)
axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, 'o', color=fwcol, label='')
axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',
color=fwcol, label='')
axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,
color=fwcol, label='')
axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, ash, color=fwcol, label='')
axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)
axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)
axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)
axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)
axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)
axx[0].set_title('a) Estuarine limb', fontsize=14)
axx[1].set_title('b) Overturning limb', fontsize=14)
axx[0].set_ylabel(
'$\\mathbf{\\delta}\\ U_{FW}$\nFW transport in $\\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel(
'$\\mathbf{\\gamma}\\ U_{FW}$\nFW transport in $\\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel(
'$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\nPWN transport in $\\mathbf{PWS}$ [Sv]'
)
axx[1].set_xlabel(
'$\\mathbf{\\epsilon} \\ U_{PWN}$\nPWN transport in $\\mathbf{DWS}$ [Sv]'
)
for axi in (axx[0], axx[1]):
axi.axhline(0, color='k')
axi.set_xlim(-0.05, 2.2)
axx[0].axhline(56, color='k', linestyle='--')
axx[1].axhline(37, color='k', linestyle='--')
savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')
savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')
<|reserved_special_token_0|>
def get_PWN_from_FW(x2, y1, y2, y3):
x3 = (y3 - y1) * x2 / (y2 - y1)
return x3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_U_S_T_from_WM(WM):
U = {}
S = {}
T = {}
for wm in WM.WM:
U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
U['SI'] = 0.073
U['FW'] = 0.028
U['Q'] = Q
S['SI'] = 0
S['FW'] = 0
T['SI'] = 0
T['FW'] = 0
T['Q'] = 1
return U, S, T
<|reserved_special_token_0|>
def get_U_from_x(x):
U = {}
U['PWS'] = x[0]
U['AWS'] = x[1]
U['DWS'] = x[2]
U['PWN'] = x[3]
U['AWN'] = x[4]
U['FW'] = x[5]
U['SI'] = x[6]
U['Q'] = x[7]
return U
<|reserved_special_token_0|>
AM[zz].dot(x0[zz])
16 / 35
1.5 / 10
<|reserved_special_token_0|>
def run_inverse_model(zz, U, S, T):
dv = -AM[zz].dot(x0[zz])
if zz == 'base':
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
elif zz == 'massbal':
Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])
Evec = array([(xx / 5) for xx in x0[zz]])
E = diag(Evec)
Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
xbase = x0[zz] + xsol_Ad
P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +
linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase = get_U_from_x(xbase)
Ue = get_U_from_x(P)
return Ubase, Ue, xbase
<|reserved_special_token_0|>
def plot_base_case_simple(Ubase, Ue, plt):
f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,
gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))
alf = 0.75
capi = 7
axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[
coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',
'DWS']], capsize=capi, alpha=alf)
axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')
ylimi = 20
axx[0].set_ylim(-ylimi, ylimi)
ylimi = 4
axx[1].set_ylim(-ylimi, ylimi)
axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color
=[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in
['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)
axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',
color='k')
axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue
['SI'] + Ue['FW'], capsize=capi, alpha=alf)
axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')
fwlim = 0.2
axx[2].set_ylim(-fwlim, fwlim)
fsz = 14
axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)
axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],
yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)
axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')
for ii in range(3):
axx[ii].axhline(0, color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS', 'DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',
bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',
bbox_inches='tight')
plot_base_case_simple(Ubase, Ue, 'base')
U
Ubase['SI'] + Ubase['FW']
Ubase['Q'] * cp * rhow / 1000000.0
<|reserved_special_token_0|>
basediff
plot_base_case_simple(Umb_sol, Umb_err, 'mb')
[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]
<|reserved_special_token_0|>
def get_a_b_fracs(Ubase, S):
a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[
'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *
(S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
return a, b
S['PWN'] / S['AWS']
S['PWS'] / S['AWS']
S['DWS'] / S['AWS']
Ubase['PWS']
Ubase['DWS']
Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1)
Ubase['PWS'] * (S['PWS'] / S['AWS'] - 1)
Ubase['DWS'] * (S['DWS'] / S['AWS'] - 1)
Ubase['FW'] + Ubase['SI']
<|reserved_special_token_0|>
[(kk, S[kk] - S_mb[kk]) for kk in S]
def plot_adep():
for ii, kk in enumerate(a):
plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))
xlabel('$\\mathbf{1-\\epsilon}$\nfraction of PWN in PWS')
ylabel('$\\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0, 1)
axhline(0, color='k')
legend()
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')
plot_adep()
<|reserved_special_token_0|>
for S_SI in range(0, 10, 2):
sivar[S_SI] = {}
for T_SI in range(-90, 5, 10):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],
S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[
'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])
dv = -AM.dot(xbase)
Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
sivar[S_SI][T_SI] = xbase + xsol_Ad
def get_mats_from_dic(sivar):
Svec = array([float(ff) for ff in sivar])
Tvec = array([float(ff) for ff in sivar[Svec[0]]])
simats = {}
for QQ, kk in enumerate(Ubase):
simats[kk] = zeros((len(Svec), len(Tvec)))
for ii, ss in enumerate(Svec):
for jj, tt in enumerate(Tvec):
simats[kk][ii, jj] = sivar[ss][tt][QQ]
return Svec, Tvec, simats
<|reserved_special_token_0|>
def plot_SIresponse():
f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)
axivec = array([])
for axirow in axx:
for axi in axirow:
axivec = hstack((axivec, axi))
for axi, kk in zip(axivec, simats):
if (kk == 'FW') | (kk == 'SI'):
climi = 10
contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) *
1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)
axi.set_title(kk + ' [mSv]')
cbar = colorbar(contit, ax=axi, format='%1.0f')
elif kk == 'Q':
climi = 30
contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -
Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.
PiYG_r)
axi.set_title(kk + ' [TW]')
cbar = colorbar(contit, ax=axi, format='%2.0f')
else:
climi = 0.3
contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],
vmin=-climi, vmax=climi, cmap=cm.PuOr_r)
axi.set_title(kk + ' [Sv]')
cbar = colorbar(contit, ax=axi, format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va=
'center', rotation='vertical', fontsize=14)
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=
'tight')
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=
'tight')
plot_SIresponse()
contourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])
colorbar()
<|reserved_special_token_0|>
for S_PWNa in arange(-1, 0.05, 0.1):
pwsvar[S_PWNa] = {}
for S_PWSa in arange(-1.0, 0.05, 0.1):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],
S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T
['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[
'SI'], 1]])
dv = -AM.dot(xbase)
Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad
<|reserved_special_token_0|>
(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0
U_pw['FW'] + U_pw['SI']
Ubase['FW'] + Ubase['SI']
U_si
[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]
[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0
U_pw['Q'] * cp * rhow / 1000000.0
def lineplot_PW_salinity():
f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)
xind = -1
yind = -1
svr = len(PWS_Svec)
xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],
[(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]
ufw_tot = -Ubase['SI'] - Ubase['FW']
yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,
pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(
[(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in
range(svr)])]
yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -
Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in
range(svr)])]
xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']
titvec = ['a) Vary PWN salinity\n\nPWS = 34.4',
'b) Vary PWS salinity\n\nPWN = 33.7', 'c) Vary both PW salinities']
lw = 2
for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:
axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for
ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)
for ii in range(3):
ax1 = axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)
ax2 = axx[ii].twinx()
ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=
'limegreen', linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10, 10)
ax2.set_ylim(-40, 40)
axx[ii].set_title(titvec[ii], fontweight='bold')
if ii != 2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])
axx[0].set_ylim(-1.5, 1.5)
axx[0].set_yticks(arange(-1, 1.1, 0.5))
ax2.spines['right'].set_position(('axes', 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')
ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2 = axx[2].twiny()
axi2.set_xticks(arange(32.8, 33.8, 0.2))
axi2.set_xlim(xvar[0][0], xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4 - 0.5, color='k', zorder=0)
savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')
savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')
lineplot_PW_salinity()
37 / (56 + 37 + 5)
Ubase['FW']
Ubase['SI']
<|reserved_special_token_0|>
for U_FW in arange(0, 0.11, 0.01):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[
'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[
'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])
xinit = xbase.copy()
xinit[5] = xinit[5] + U_FW
dv = -AM.dot(xinit)
Evec = xinit / 5
Evec[5:7] = 1e-10
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
fwvar[U_FW] = xinit + xsol_Ad
<|reserved_special_token_0|>
U['FW'] + U['SI']
Ubase['FW'] + Ubase['SI'] + 0.05
U_fwvar['FW'] + U_fwvar['SI']
U_fwvar['Q'] * cp * rhow / 1000000.0
U_fwvar
<|reserved_special_token_0|>
for ii, ee in enumerate(1 - epsilon):
a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[
'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)
) / (pwmats['FW'].T + pwmats['SI'].T)
b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat
) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (
pwmats['FW'].T + pwmats['SI'].T)
<|reserved_special_token_0|>
PWN_Smat[10, 10]
PWS_Smat[10, 10]
PWN_Smat[5, 5]
PWS_Smat[5, 5]
<|reserved_special_token_0|>
def plot_adep_pw():
f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)
f.subplots_adjust(wspace=0.3)
for ii, var in enumerate([a_pwmat, b_pwmat]):
if ii == 0:
xvar = 1 - epsilon
xvar2 = 1
xvar3 = 0
else:
xvar = epsilon
xvar2 = 0
xvar3 = 1
axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, linewidth=3, color='k', label=
'Base case', zorder=5)
axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[
'SI']) * 1000.0, color='purple', zorder=4, label=
'Polar Waters fresher by 0.5', linewidth=3)
axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)
axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw
['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')
axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)
axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +
U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')
axx[ii].set_ylim(-30, 140)
axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +
U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=
'Add 20 mSv of Fresh Water')
axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[
'SI']) * 1000.0, linewidth=3, color=fwcol)
axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, 'o', color=fwcol, label='')
axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',
color=fwcol, label='')
axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,
color=fwcol, label='')
axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, ash, color=fwcol, label='')
axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)
axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)
axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)
axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)
axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)
axx[0].set_title('a) Estuarine limb', fontsize=14)
axx[1].set_title('b) Overturning limb', fontsize=14)
axx[0].set_ylabel(
'$\\mathbf{\\delta}\\ U_{FW}$\nFW transport in $\\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel(
'$\\mathbf{\\gamma}\\ U_{FW}$\nFW transport in $\\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel(
'$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\nPWN transport in $\\mathbf{PWS}$ [Sv]'
)
axx[1].set_xlabel(
'$\\mathbf{\\epsilon} \\ U_{PWN}$\nPWN transport in $\\mathbf{DWS}$ [Sv]'
)
for axi in (axx[0], axx[1]):
axi.axhline(0, color='k')
axi.set_xlim(-0.05, 2.2)
axx[0].axhline(56, color='k', linestyle='--')
axx[1].axhline(37, color='k', linestyle='--')
savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')
savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')
plot_adep_pw()
def get_PWN_from_FW(x2, y1, y2, y3):
x3 = (y3 - y1) * x2 / (y2 - y1)
return x3
<|reserved_special_token_0|>
x3_base_PWS
Ubase['PWN']
1 - x3_base_PWS / Ubase['PWN']
<|reserved_special_token_0|>
x3_fresh_PWS
U_pw['PWN']
def get_AWS_from_PWN(Uvar, Svar, eps):
alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[
'PWN']) / Svar['AWS']
beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']
) / Svar['AWS']
return alpha_U, beta_U
get_AWS_from_PWN(Ubase, S, 0.65)
get_AWS_from_PWN(U_pw, S_PW, 0.65)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
figdir = (
'/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'
)
figdir_paper = (
'/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'
)
WM = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_2008.nc')
WM_mb = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_mb_2008.nc')
cp = 3850
rhow = 1025
tera = 10 ** 12
Q = -251 * tera / rhow / cp / 1000000.0
def get_U_S_T_from_WM(WM):
U = {}
S = {}
T = {}
for wm in WM.WM:
U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(
'TIME.month').mean('TIME').mean(dim='month').values)
U['SI'] = 0.073
U['FW'] = 0.028
U['Q'] = Q
S['SI'] = 0
S['FW'] = 0
T['SI'] = 0
T['FW'] = 0
T['Q'] = 1
return U, S, T
U, S, T = get_U_S_T_from_WM(WM)
U_mb, S_mb, T_mb = get_U_S_T_from_WM(WM_mb)
def get_U_from_x(x):
U = {}
U['PWS'] = x[0]
U['AWS'] = x[1]
U['DWS'] = x[2]
U['PWN'] = x[3]
U['AWN'] = x[4]
U['FW'] = x[5]
U['SI'] = x[6]
U['Q'] = x[7]
return U
AM = {}
x0 = {}
AM['base'] = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],
S['PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T['DWS'],
T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])
x0['base'] = [U['PWS'], U['AWS'], U['DWS'], U['PWN'], U['AWN'], U['FW'], U[
'SI'], U['Q']]
AM['massbal'] = array([[1, 1, 1, 0, 0, 0.5, 0.5, 0], [0, 0, 0, 1, 1, 0.5,
0.5, 0], [S_mb['PWS'], S_mb['AWS'], S_mb['DWS'], S_mb['PWN'], S_mb[
'AWN'], S_mb['FW'], S_mb['SI'], 0], [T_mb['PWS'], T_mb['AWS'], T_mb[
'DWS'], T_mb['PWN'], T_mb['AWN'], T_mb['FW'], T_mb['SI'], 1]])
x0['massbal'] = [U_mb['PWS'], U_mb['AWS'], U_mb['DWS'], U_mb['PWN'], U_mb[
'AWN'], U_mb['FW'], U_mb['SI'], U_mb['Q']]
zz = 'base'
AM[zz].dot(x0[zz])
16 / 35
1.5 / 10
Snorm = 35
Tnorm = 5
def run_inverse_model(zz, U, S, T):
dv = -AM[zz].dot(x0[zz])
if zz == 'base':
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
elif zz == 'massbal':
Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])
Evec = array([(xx / 5) for xx in x0[zz]])
E = diag(Evec)
Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
xbase = x0[zz] + xsol_Ad
P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +
linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase = get_U_from_x(xbase)
Ue = get_U_from_x(P)
return Ubase, Ue, xbase
Ubase, Ue, xbase = run_inverse_model('base', U, S, T)
Umb_sol, Umb_err, xmb = run_inverse_model('massbal', U_mb, S_mb, T_mb)
coldic = {'AWS': 'red', 'DWS': 'grey', 'PWS': 'royalblue', 'PWN': 'purple',
'AWN': 'orange', 'SI': 'cyan', 'FW': 'cyan', 'Q': 'limegreen'}
def plot_base_case_simple(Ubase, Ue, plt):
f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,
gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))
alf = 0.75
capi = 7
axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[
coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',
'DWS']], capsize=capi, alpha=alf)
axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')
ylimi = 20
axx[0].set_ylim(-ylimi, ylimi)
ylimi = 4
axx[1].set_ylim(-ylimi, ylimi)
axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color
=[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in
['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)
axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',
color='k')
axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue
['SI'] + Ue['FW'], capsize=capi, alpha=alf)
axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')
fwlim = 0.2
axx[2].set_ylim(-fwlim, fwlim)
fsz = 14
axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)
axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],
yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)
axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')
for ii in range(3):
axx[ii].axhline(0, color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS', 'DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',
bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',
bbox_inches='tight')
plot_base_case_simple(Ubase, Ue, 'base')
U
Ubase['SI'] + Ubase['FW']
Ubase['Q'] * cp * rhow / 1000000.0
basediff = [(kk, Ubase[kk] - U[kk]) for kk in Ubase]
basediff
plot_base_case_simple(Umb_sol, Umb_err, 'mb')
[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]
epsilon = arange(0, 1.1, 0.1)
def get_a_b_fracs(Ubase, S):
a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[
'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *
(S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])
return a, b
S['PWN'] / S['AWS']
S['PWS'] / S['AWS']
S['DWS'] / S['AWS']
Ubase['PWS']
Ubase['DWS']
Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1)
Ubase['PWS'] * (S['PWS'] / S['AWS'] - 1)
Ubase['DWS'] * (S['DWS'] / S['AWS'] - 1)
Ubase['FW'] + Ubase['SI']
a = {}
b = {}
a['base'], b['base'] = get_a_b_fracs(Ubase, S)
a['mb'], b['mb'] = get_a_b_fracs(Umb_sol, S_mb)
[(kk, S[kk] - S_mb[kk]) for kk in S]
def plot_adep():
for ii, kk in enumerate(a):
plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))
xlabel('$\\mathbf{1-\\epsilon}$\nfraction of PWN in PWS')
ylabel('$\\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0, 1)
axhline(0, color='k')
legend()
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')
savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')
plot_adep()
sivar = {}
for S_SI in range(0, 10, 2):
sivar[S_SI] = {}
for T_SI in range(-90, 5, 10):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],
S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[
'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])
dv = -AM.dot(xbase)
Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
sivar[S_SI][T_SI] = xbase + xsol_Ad
def get_mats_from_dic(sivar):
Svec = array([float(ff) for ff in sivar])
Tvec = array([float(ff) for ff in sivar[Svec[0]]])
simats = {}
for QQ, kk in enumerate(Ubase):
simats[kk] = zeros((len(Svec), len(Tvec)))
for ii, ss in enumerate(Svec):
for jj, tt in enumerate(Tvec):
simats[kk][ii, jj] = sivar[ss][tt][QQ]
return Svec, Tvec, simats
Svec, Tvec, simats = get_mats_from_dic(sivar)
def plot_SIresponse():
f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)
axivec = array([])
for axirow in axx:
for axi in axirow:
axivec = hstack((axivec, axi))
for axi, kk in zip(axivec, simats):
if (kk == 'FW') | (kk == 'SI'):
climi = 10
contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) *
1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)
axi.set_title(kk + ' [mSv]')
cbar = colorbar(contit, ax=axi, format='%1.0f')
elif kk == 'Q':
climi = 30
contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -
Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.
PiYG_r)
axi.set_title(kk + ' [TW]')
cbar = colorbar(contit, ax=axi, format='%2.0f')
else:
climi = 0.3
contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],
vmin=-climi, vmax=climi, cmap=cm.PuOr_r)
axi.set_title(kk + ' [Sv]')
cbar = colorbar(contit, ax=axi, format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va=
'center', rotation='vertical', fontsize=14)
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=
'tight')
savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=
'tight')
plot_SIresponse()
contourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])
colorbar()
pwsvar = {}
for S_PWNa in arange(-1, 0.05, 0.1):
pwsvar[S_PWNa] = {}
for S_PWSa in arange(-1.0, 0.05, 0.1):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],
S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T
['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[
'SI'], 1]])
dv = -AM.dot(xbase)
Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad
PWN_Svec, PWS_Svec, pwmats = get_mats_from_dic(pwsvar)
PWN_Smat, PWS_Smat = meshgrid(PWN_Svec, PWS_Svec)
U_si = get_U_from_x(sivar[0][-30])
U_pw = get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])
(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0
U_pw['FW'] + U_pw['SI']
Ubase['FW'] + Ubase['SI']
U_si
[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]
[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0
U_pw['Q'] * cp * rhow / 1000000.0
def lineplot_PW_salinity():
f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)
xind = -1
yind = -1
svr = len(PWS_Svec)
xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],
[(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]
ufw_tot = -Ubase['SI'] - Ubase['FW']
yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,
pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(
[(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in
range(svr)])]
yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -
Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in
range(svr)])]
xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']
titvec = ['a) Vary PWN salinity\n\nPWS = 34.4',
'b) Vary PWS salinity\n\nPWN = 33.7', 'c) Vary both PW salinities']
lw = 2
for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:
axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=
coldic[kk], label=kk, linewidth=lw)
axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for
ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)
for ii in range(3):
ax1 = axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)
ax2 = axx[ii].twinx()
ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=
'limegreen', linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10, 10)
ax2.set_ylim(-40, 40)
axx[ii].set_title(titvec[ii], fontweight='bold')
if ii != 2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])
axx[0].set_ylim(-1.5, 1.5)
axx[0].set_yticks(arange(-1, 1.1, 0.5))
ax2.spines['right'].set_position(('axes', 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')
ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2 = axx[2].twiny()
axi2.set_xticks(arange(32.8, 33.8, 0.2))
axi2.set_xlim(xvar[0][0], xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4 - 0.5, color='k', zorder=0)
savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')
savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')
lineplot_PW_salinity()
37 / (56 + 37 + 5)
Ubase['FW']
Ubase['SI']
fwvar = {}
for U_FW in arange(0, 0.11, 0.01):
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[
'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[
'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])
xinit = xbase.copy()
xinit[5] = xinit[5] + U_FW
dv = -AM.dot(xinit)
Evec = xinit / 5
Evec[5:7] = 1e-10
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
fwvar[U_FW] = xinit + xsol_Ad
U_fwvar = get_U_from_x(fwvar[0.02])
a_fw, b_fw = get_a_b_fracs(U_fwvar, S)
U['FW'] + U['SI']
Ubase['FW'] + Ubase['SI'] + 0.05
U_fwvar['FW'] + U_fwvar['SI']
U_fwvar['Q'] * cp * rhow / 1000000.0
U_fwvar
AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] - 0.5, S['AWS'], S['DWS'],
S['PWN'] - 0.5, S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[
'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])
xinit = xbase.copy()
xinit[5] = xinit[5] + 0.02
dv = -AM.dot(xinit)
Evec = xinit / 5
Evec[5:7] = 1e-10
E = diag(Evec)
Winv = diag([1, 1 / Snorm, 1 / Tnorm])
Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)
xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad = E.dot(xsol_prime)
x_both = xinit + xsol_Ad
U_both = get_U_from_x(x_both)
S_PW = S.copy()
S_PW['PWS'] = S['PWS'] - 0.5
S_PW['PWN'] = S['PWN'] - 0.5
a_both, b_both = get_a_b_fracs(U_both, S_PW)
a_pwmat = zeros((len(epsilon), shape(pwmats['Q'])[1], shape(pwmats['Q'])[0]))
b_pwmat = a_pwmat.copy()
for ii, ee in enumerate(1 - epsilon):
a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[
'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)
) / (pwmats['FW'].T + pwmats['SI'].T)
b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat
) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (
pwmats['FW'].T + pwmats['SI'].T)
c_pwmat = 1 - a_pwmat - b_pwmat
PWN_Smat[10, 10]
PWS_Smat[10, 10]
PWN_Smat[5, 5]
PWS_Smat[5, 5]
epsilon = arange(0, 1.1, 0.1)
fwcol = '#43a2ca'
ash = 'd'
def plot_adep_pw():
f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)
f.subplots_adjust(wspace=0.3)
for ii, var in enumerate([a_pwmat, b_pwmat]):
if ii == 0:
xvar = 1 - epsilon
xvar2 = 1
xvar3 = 0
else:
xvar = epsilon
xvar2 = 0
xvar3 = 1
axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, linewidth=3, color='k', label=
'Base case', zorder=5)
axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[
'SI']) * 1000.0, color='purple', zorder=4, label=
'Polar Waters fresher by 0.5', linewidth=3)
axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)
axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw
['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')
axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +
Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)
axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +
U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')
axx[ii].set_ylim(-30, 140)
axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +
U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=
'Add 20 mSv of Fresh Water')
axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[
'SI']) * 1000.0, linewidth=3, color=fwcol)
axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, 'o', color=fwcol, label='')
axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',
color=fwcol, label='')
axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,
color=fwcol, label='')
axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *
1000.0, ash, color=fwcol, label='')
axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)
axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)
axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)
axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)
axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)
axx[0].set_title('a) Estuarine limb', fontsize=14)
axx[1].set_title('b) Overturning limb', fontsize=14)
axx[0].set_ylabel(
'$\\mathbf{\\delta}\\ U_{FW}$\nFW transport in $\\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel(
'$\\mathbf{\\gamma}\\ U_{FW}$\nFW transport in $\\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel(
'$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\nPWN transport in $\\mathbf{PWS}$ [Sv]'
)
axx[1].set_xlabel(
'$\\mathbf{\\epsilon} \\ U_{PWN}$\nPWN transport in $\\mathbf{DWS}$ [Sv]'
)
for axi in (axx[0], axx[1]):
axi.axhline(0, color='k')
axi.set_xlim(-0.05, 2.2)
axx[0].axhline(56, color='k', linestyle='--')
axx[1].axhline(37, color='k', linestyle='--')
savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')
savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')
plot_adep_pw()
def get_PWN_from_FW(x2, y1, y2, y3):
x3 = (y3 - y1) * x2 / (y2 - y1)
return x3
x3_base_PWS = get_PWN_from_FW(Ubase['PWN'], (Ubase['FW'] + Ubase['SI']) *
a_pwmat[-1, 10, 10] * 1000.0, (Ubase['FW'] + Ubase['SI']) * a_pwmat[0,
10, 10] * 1000.0, 50)
x3_base_PWS
Ubase['PWN']
1 - x3_base_PWS / Ubase['PWN']
x3_fresh_PWS = get_PWN_from_FW(U_pw['PWN'], (U_pw['FW'] + U_pw['SI']) *
a_pwmat[-1, 5, 5] * 1000.0, (U_pw['FW'] + U_pw['SI']) * a_pwmat[0, 5, 5
] * 1000.0, 50)
x3_fresh_PWS
U_pw['PWN']
def get_AWS_from_PWN(Uvar, Svar, eps):
alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[
'PWN']) / Svar['AWS']
beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']
) / Svar['AWS']
return alpha_U, beta_U
get_AWS_from_PWN(Ubase, S, 0.65)
get_AWS_from_PWN(U_pw, S_PW, 0.65)
<|reserved_special_token_1|>
from firstfuncs_1618 import *
figdir='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'
figdir_paper='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'
########################################################################################################
########################################################################################################
#### Set up the optimization framework, which allows for varying almost all elements within a prescribed range
########################################################################################################
########################################################################################################
WM=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_2008.nc')
WM_mb=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_mb_2008.nc')
cp=3850
rhow=1025
tera=10**12
#Noresm (taking sea ice into account)
Q=-251*tera/rhow/cp/1e6 #for the Sverdrups
def get_U_S_T_from_WM(WM):
U={}
S={}
T={}
for wm in WM.WM:
U[str(wm.values)]=float(WM['TRANS'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)]=float(WM['PSAL'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)]=float(WM['PTMP'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
U['SI']=0.073 # NorESM fresh water input v. similar to Kwok et al. 2004 70mSv
U['FW']=0.028 # mean E-P from JRA55
U['Q']=Q
S['SI']=0
S['FW']=0
T['SI']=0
T['FW']=0
T['Q']=1
return U,S,T
U,S,T=get_U_S_T_from_WM(WM)
U_mb,S_mb,T_mb=get_U_S_T_from_WM(WM_mb)
def get_U_from_x(x):
U={}
U['PWS']=x[0]
U['AWS']=x[1]
U['DWS']=x[2]
U['PWN']=x[3]
U['AWN']=x[4]
U['FW']=x[5]
U['SI']=x[6]
U['Q']=x[7]
return U
AM={}
x0={}
AM['base']=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
x0['base']=[U['PWS'],U['AWS'],U['DWS'],U['PWN'],U['AWN'],U['FW'],U['SI'],U['Q']]
AM['massbal']=array([[1,1,1,0,0,0.5,0.5,0],\
[0,0,0,1,1,0.5,0.5,0],\
[S_mb['PWS'],S_mb['AWS'],S_mb['DWS'],S_mb['PWN'],S_mb['AWN'],S_mb['FW'],S_mb['SI'],0],\
[T_mb['PWS'],T_mb['AWS'],T_mb['DWS'],T_mb['PWN'],T_mb['AWN'],T_mb['FW'],T_mb['SI'],1]])
x0['massbal']=[U_mb['PWS'],U_mb['AWS'],U_mb['DWS'],U_mb['PWN'],U_mb['AWN'],U_mb['FW'],U_mb['SI'],U_mb['Q']]
zz='base'
AM[zz].dot(x0[zz])
16/35
1.5/10
#vars that I want to be handy for later calcs
Snorm=35
Tnorm=5
def run_inverse_model(zz,U,S,T):
dv=-AM[zz].dot(x0[zz])
if zz=='base':
Winv=diag([1,1/Snorm,1/Tnorm])
elif zz=='massbal':
Winv=diag([1,1,1/Snorm,1/Tnorm])
Evec=array([xx/5 for xx in x0[zz]])
# Evec=hstack((5*[1],0.02,0.02,Qvar))
E=diag(Evec)
Umat,D,VmatT=linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
xbase=x0[zz]+xsol_Ad
P=diag(E-E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T))+linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase=get_U_from_x(xbase)
Ue=get_U_from_x(P)
return Ubase,Ue,xbase
Ubase,Ue,xbase=run_inverse_model('base',U,S,T)
Umb_sol,Umb_err,xmb=run_inverse_model('massbal',U_mb,S_mb,T_mb)
coldic={'AWS':'red','DWS':'grey','PWS':'royalblue','PWN':'purple','AWN':'orange','SI':'cyan','FW':'cyan','Q':'limegreen'}
def plot_base_case_simple(Ubase,Ue,plt):
f,axx=subplots(1,4,figsize=(9,2.5),constrained_layout=True,gridspec_kw=dict(width_ratios=[2,3,1,1]))
alf=0.75
capi=7
#U
axx[0].bar(range(2),[Ubase[kk] for kk in ['AWS','DWS']],color=[coldic[kk] for kk in ['AWS','DWS']],yerr=[Ue[kk] for kk in ['AWS','DWS']],capsize=capi,alpha=alf)
axx[0].plot(range(2),[U[kk] for kk in ['AWS','DWS']],'o',color='k')
ylimi=20
axx[0].set_ylim(-ylimi,ylimi)
ylimi=4
axx[1].set_ylim(-ylimi,ylimi)
axx[1].bar(range(3),[Ubase[kk] for kk in ['PWS','PWN','AWN']],color=[coldic[kk] for kk in ['PWS','PWN','AWN']],yerr=[Ue[kk] for kk in ['PWS','PWN','AWN']],capsize=capi,alpha=alf)
axx[1].plot(range(3),[U[kk] for kk in ['PWS','PWN','AWN']],'o',color='k')
axx[2].bar(range(1),U['SI']+Ubase['FW'],color=coldic['FW'],yerr=Ue['SI']+Ue['FW'],capsize=capi,alpha=alf)
axx[2].plot(range(1),U['SI']+U['FW'],'o',color='k')
fwlim=0.2
axx[2].set_ylim(-fwlim,fwlim)
fsz=14
axx[0].set_ylabel('Volume transport [Sv]',fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]',fontsize=fsz)
axx[3].bar(0,cp*rhow*(Ubase['Q'])/1e6,color=coldic['Q'],yerr=cp*rhow*Ue['Q']/1e6,capsize=capi,alpha=alf)
axx[3].plot(0,cp*rhow*(U['Q'])/1e6,'o',color='k')
for ii in range(3):
axx[ii].axhline(0,color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS','DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS','PWN','AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.pdf',bbox_inches='tight')
plot_base_case_simple(Ubase,Ue,'base')
U
Ubase['SI']+Ubase['FW']
Ubase['Q']*cp*rhow/1e6
basediff=[(kk,Ubase[kk]-U[kk]) for kk in Ubase]
basediff
plot_base_case_simple(Umb_sol,Umb_err,'mb')
[(kk,Umb_sol[kk]-U_mb[kk]) for kk in Ubase]
##################################################################################
# Calculate fraction of fresh water vs. other water masses that goes into each limb
#################################################################################
#fraction of PWN in DWS limb
epsilon=arange(0,1.1,0.1)
def get_a_b_fracs(Ubase,S):
#fraction of FW in PWS, as a function of epsilon
a=((1-epsilon)*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['PWS']*(S['PWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
#fraction of FW in DWS, as a function of epsilon
b=(epsilon*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['DWS']*(S['DWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
return a,b
S['PWN']/S['AWS']
S['PWS']/S['AWS']
S['DWS']/S['AWS']
Ubase['PWS']
Ubase['DWS']
Ubase['PWN']*(S['PWN']/S['AWS']-1)
Ubase['PWS']*(S['PWS']/S['AWS']-1)
Ubase['DWS']*(S['DWS']/S['AWS']-1)
(Ubase['FW']+Ubase['SI'])
a={}
b={}
a['base'],b['base']=get_a_b_fracs(Ubase,S)
a['mb'],b['mb']=get_a_b_fracs(Umb_sol,S_mb)
[(kk,S[kk]-S_mb[kk]) for kk in S]
def plot_adep():
for ii,kk in enumerate(a):
plot(1-epsilon,a[kk],linewidth=3,label=kk,color='C'+str(ii))
xlabel('$\mathbf{1-\epsilon}$\nfraction of PWN in PWS')
ylabel('$\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0,1)
axhline(0,color='k')
legend()
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.pdf',bbox_inches='tight')
plot_adep()
#################################################################################
##### Look into how much Sea ice properties matter
#################################################################################
sivar={}
for S_SI in range(0,10,2):
sivar[S_SI]={}
for T_SI in range(-90,5,10):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S_SI,0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T_SI,1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
sivar[S_SI][T_SI]=xbase+xsol_Ad
def get_mats_from_dic(sivar):
Svec=array([float(ff) for ff in sivar])
Tvec=array([float(ff) for ff in sivar[Svec[0]]])
simats={}
for QQ,kk in enumerate(Ubase):
simats[kk]=zeros((len(Svec),len(Tvec)))
for ii,ss in enumerate(Svec):
for jj,tt in enumerate(Tvec):
simats[kk][ii,jj]=sivar[ss][tt][QQ]
return Svec,Tvec,simats
Svec,Tvec,simats=get_mats_from_dic(sivar)
def plot_SIresponse():
f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
axivec=array([])
for axirow in axx:
for axi in axirow:
axivec=hstack((axivec,axi))
for axi,kk in zip(axivec,simats):
if (kk=='FW') | (kk=='SI'):
climi=10
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
axi.set_title(kk+' [mSv]')
cbar=colorbar(contit,ax=axi,format='%1.0f')
elif kk=='Q':
climi=30
contit=axi.contourf(Svec,Tvec,cp*rhow*(simats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
axi.set_title(kk+' [TW]')
cbar=colorbar(contit,ax=axi,format='%2.0f')
else:
climi=0.3
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
axi.set_title(kk+' [Sv]')
cbar=colorbar(contit,ax=axi,format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center',fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\circ$C]', va='center',rotation='vertical',fontsize=14)
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.pdf',bbox_inches='tight')
plot_SIresponse()
contourf(simats['AWN'].T-Ubase['AWN']+simats['PWN'].T-Ubase['PWN'])
colorbar()
#################################################################################
##### Test dependence on PW salinity (both north and south)
#################################################################################
pwsvar={}
for S_PWNa in arange(-1,0.05,0.1):
pwsvar[S_PWNa]={}
for S_PWSa in arange(-1.0,0.05,0.1):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']+S_PWSa,S['AWS'],S['DWS'],S['PWN']+S_PWNa,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
pwsvar[S_PWNa][S_PWSa]=xbase+xsol_Ad
PWN_Svec,PWS_Svec,pwmats=get_mats_from_dic(pwsvar)
####################################################################################################
######## Response is pretty uniform: try to tease out a pattern (and look at other deps?) #######
##################################################################################################
PWN_Smat,PWS_Smat=meshgrid(PWN_Svec,PWS_Svec)
U_si=get_U_from_x(sivar[0][-30])
U_pw=get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])
(U_pw['FW']+U_pw['SI']-(Ubase['FW']+Ubase['SI']))*1e3
U_pw['FW']+U_pw['SI']
Ubase['FW']+Ubase['SI']
U_si
[(kk,U_si[kk]-Ubase[kk]) for kk in Ubase]
[U_si[kk]-Ubase[kk] for kk in Ubase][-1]*cp*rhow/1e6
U_pw['Q']*cp*rhow/1e6
def lineplot_PW_salinity():
f,axx=subplots(1,3,figsize=(11,3),sharey=True)
xind=-1
yind=-1
svr=len(PWS_Svec)
xvar=[(S['PWN']+PWN_Smat)[xind,:],(S['PWS']+PWS_Smat)[:,yind],[(S['PWS']+PWS_Smat)[ii,ii] for ii in range(svr)]]
ufw_tot=-Ubase['SI']-Ubase['FW']
yvar_fw=[pwmats['FW'].T[xind,:]+pwmats['SI'].T[xind,:]+ufw_tot,pwmats['FW'].T[:,yind]+pwmats['SI'].T[:,yind]+ufw_tot,array([pwmats['FW'].T[ii,ii]+pwmats['SI'].T[ii,ii]+ufw_tot for ii in range(svr)])]
yvar_Q=[pwmats['Q'].T[xind,:]-Ubase['Q'],pwmats['Q'].T[:,yind]-Ubase['Q'],array([pwmats['Q'].T[ii,ii]-Ubase['Q'] for ii in range(svr)])]
xlab=['PWN salinity','PWS salinity','PWS salinity']
titvec=['a) Vary PWN salinity\n\nPWS = 34.4','b) Vary PWS salinity\n\nPWN = 33.7','c) Vary both PW salinities']
lw=2
for kk in ['AWS','PWS','DWS','AWN','PWN']:
axx[0].plot(xvar[0],(pwmats[kk].T[xind,:]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[1].plot(xvar[1],(pwmats[kk].T[:,yind]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[2].plot(xvar[2],array([(pwmats[kk].T[ii,ii]-Ubase[kk])for ii in range(svr)]),color=coldic[kk],label=kk,linewidth=lw)
for ii in range(3):
ax1=axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii],(yvar_fw[ii])*1e3,color='c',linewidth=lw)
ax2=axx[ii].twinx()
ax2.plot(xvar[ii],cp*rhow*(yvar_Q[ii])/1e6,color='limegreen',linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10,10)
ax2.set_ylim(-40,40)
axx[ii].set_title(titvec[ii],fontweight='bold')
if ii!=2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0],xvar[ii][-1])
axx[0].set_ylim(-1.5,1.5)
axx[0].set_yticks(arange(-1,1.1,0.5))
ax2.spines["right"].set_position(("axes", 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]',color='c')
ax2.set_ylabel('Heat flux anomaly [TW]',color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg=axx[0].legend(loc=(0.5,-0.5),ncol=5,fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2=axx[2].twiny()
axi2.set_xticks(arange(32.8,33.8,0.2))
axi2.set_xlim(xvar[0][0],xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4-0.5,color='k',zorder=0)
# axx[0].set_title('a) Vary PWN salinities\n\n',fontweight='bold')
# axx[1].set_title('b) Vary PWS salinities\n\n',fontweight='bold')
# axx[2].set_title('c) Vary both PW salinities',fontweight='bold')
savefig(figdir_paper+'/PWS_dep.png',bbox_inches='tight')
savefig(figdir_paper+'/PWS_dep.pdf',bbox_inches='tight')
lineplot_PW_salinity()
37/(56+37+5)
#######################################################################################
############## What happens if we add more FW? (Like 100mSv) ###########################
#######################################################################################
Ubase['FW']
Ubase['SI']
fwvar={}
for U_FW in arange(0,0.11,0.01):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+U_FW
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
fwvar[U_FW]=xinit+xsol_Ad
U_fwvar=get_U_from_x(fwvar[0.02])
a_fw,b_fw=get_a_b_fracs(U_fwvar,S)
U['FW']+U['SI']
Ubase['FW']+Ubase['SI']+0.05
U_fwvar['FW']+U_fwvar['SI']
U_fwvar['Q']*cp*rhow/1e6
U_fwvar
#######################################################################################
############## What happens if we add more FW and make PWS fresher? ###########################
#######################################################################################
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']-0.5,S['AWS'],S['DWS'],S['PWN']-0.5,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+0.02
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
x_both=xinit+xsol_Ad
U_both=get_U_from_x(x_both)
S_PW=S.copy()
S_PW['PWS']=S['PWS']-0.5
S_PW['PWN']=S['PWN']-0.5
a_both,b_both=get_a_b_fracs(U_both,S_PW)
#######################################################################################
############## Now look at consequences for FW dist ###########################
#######################################################################################
a_pwmat=zeros((len(epsilon),shape(pwmats['Q'])[1],shape(pwmats['Q'])[0]))
b_pwmat=a_pwmat.copy()
for ii,ee in enumerate(1-epsilon):
a_pwmat[ii,:,:]=(ee*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['PWS'].T*((S['PWS']+PWS_Smat)/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
b_pwmat[ii,:,:]=((1-ee)*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['DWS'].T*(S['DWS']/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
c_pwmat=1-a_pwmat-b_pwmat
PWN_Smat[10,10]
PWS_Smat[10,10]
PWN_Smat[5,5]
PWS_Smat[5,5]
epsilon=arange(0,1.1,0.1)
fwcol='#43a2ca'
ash='d'
def plot_adep_pw():
f,axx=subplots(1,2,figsize=(11,3.2),sharex=True)
f.subplots_adjust(wspace=0.3)
for ii,var in enumerate([a_pwmat,b_pwmat]):
if ii==0:
xvar=(1-epsilon)
xvar2=1
xvar3=0
else:
xvar=epsilon
xvar2=0
xvar3=1
axx[ii].plot(xvar*Ubase['PWN'],var[:,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,linewidth=3,color='k',label='Base case',zorder=5)
axx[ii].plot(xvar*U_pw['PWN'],var[:,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,color='purple',zorder=4,label='Polar Waters fresher by 0.5',linewidth=3)
axx[ii].plot(xvar2*Ubase['PWN'],var[0,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,'o',color='k',label='',zorder=5)
axx[ii].plot(xvar2*U_pw['PWN'],var[0,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,'o',color='purple',zorder=4,label='')
axx[ii].plot(xvar3*Ubase['PWN'],var[-1,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,ash,color='k',label='',zorder=5)
axx[ii].plot(xvar3*U_pw['PWN'],var[-1,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,ash,color='purple',zorder=4,label='')
axx[ii].set_ylim(-30,140)
axx[0].plot((1-epsilon)*U_fwvar['PWN'],a_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol,label='Add 20 mSv of Fresh Water')
axx[1].plot(epsilon*U_fwvar['PWN'],b_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol)
axx[0].plot(U_fwvar['PWN'],a_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[1].plot(0,b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[0].plot(0,a_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[1].plot(U_fwvar['PWN'],b_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[0].plot(0.5,56,'*',color='k',label='',markersize=10)
axx[0].plot(1.1,56,'*',color='purple',label='',markersize=10)
axx[1].plot(1.3,37,'*',color='k',label='',markersize=10)
axx[1].plot(1,37,'*',color='purple',label='',markersize=10)
# axx[1].plot(U_fwvar['PWN'],b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'s',color='k',label='')
# axx[0].plot(1-epsilon,a_both,linewidth=3,color='g',label='Both')
# axx[1].plot(1-epsilon,b_both,linewidth=3,color='g')
axx[0].legend(loc=(0.05,-0.5),ncol=3,fontsize=12)
axx[0].set_title('a) Estuarine limb',fontsize=14)
axx[1].set_title('b) Overturning limb',fontsize=14)
axx[0].set_ylabel('$\mathbf{\delta}\ U_{FW}$\nFW transport in $\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel('$\mathbf{\gamma}\ U_{FW}$\nFW transport in $\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel('$\mathbf{(1-\epsilon)} \ U_{PWN}$\nPWN transport in $\mathbf{PWS}$ [Sv]')
axx[1].set_xlabel('$\mathbf{\epsilon} \ U_{PWN}$\nPWN transport in $\mathbf{DWS}$ [Sv]')
for axi in axx[0],axx[1]:
axi.axhline(0,color='k')
axi.set_xlim(-0.05,2.2)
axx[0].axhline(56,color='k',linestyle='--')
axx[1].axhline(37,color='k',linestyle='--')
savefig(figdir_paper+'/FWfrac_obs_pwdep.png',bbox_inches='tight')
savefig(figdir_paper+'/FWfrac_obs_pwdep.pdf',bbox_inches='tight')
plot_adep_pw()
def get_PWN_from_FW(x2,y1,y2,y3):
x3=(y3-y1)*x2/(y2-y1)
return x3
x3_base_PWS=get_PWN_from_FW(Ubase['PWN'],(Ubase['FW']+Ubase['SI'])*a_pwmat[-1,10,10]*1e3,(Ubase['FW']+Ubase['SI'])*a_pwmat[0,10,10]*1e3,50)
x3_base_PWS
Ubase['PWN']
1-x3_base_PWS/Ubase['PWN']
x3_fresh_PWS=get_PWN_from_FW(U_pw['PWN'],(U_pw['FW']+U_pw['SI'])*a_pwmat[-1,5,5]*1e3,(U_pw['FW']+U_pw['SI'])*a_pwmat[0,5,5]*1e3,50)
x3_fresh_PWS
U_pw['PWN']
def get_AWS_from_PWN(Uvar,Svar,eps):
alpha_U=-(Uvar['PWS']*Svar['PWS']+(1-eps)*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
beta_U=-(Uvar['DWS']*Svar['DWS']+eps*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
return alpha_U,beta_U
get_AWS_from_PWN(Ubase,S,0.65)
get_AWS_from_PWN(U_pw,S_PW,0.65)
############################graveyard
# def plot_in_each(axi):
# axi.plot(S['PWN'],S['PWS'],'ko',markersize=10)
# axi.plot(S['PWN']+PWN_Svec,S['PWN']+PWN_Svec,'r-',linewidth=3)
#
# def plot_PW_Sdep(Svec,Tvec,simats):
# f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# if (kk=='FW') | (kk=='SI'):
# climi=20
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [mSv]')
# cbar=colorbar(contit,ax=axi,format='%1.0f')
# plot_in_each(axi)
# elif kk=='Q':
# climi=30
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,cp*rhow*(pwmats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [TW]')
# cbar=colorbar(contit,ax=axi,format='%2.0f')
# plot_in_each(axi)
# else:
# climi=1.5
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [Sv]')
# cbar=colorbar(contit,ax=axi,format='%0.2f')
# plot_in_each(axi)
# for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
# label.set_visible(False)
# axi.set_ylim(S['PWS']+PWS_Svec[0],S['PWS']+PWS_Svec[-1])
# f.text(0.5, 0, 'PWN salinity', ha='center',fontsize=14)
# f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# savefig(figdir_paper+'_extra_2004/PW_Sdep.png',bbox_inches='tight')
# savefig(figdir_paper+'_extra_2004/PW_Sdep.pdf',bbox_inches='tight')
#
#
# plot_PW_Sdep(PWN_Svec,PWS_Svec,pwmats)
# def plot_PW_Sdep_lines():
# f,axx=subplots(2,4,figsize=(15,6),sharex=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[-2,:],(pwmats[kk].T[-2,:]),label='vary PWN salinity')
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[:,-3],(pwmats[kk].T[:,-3]),label='vary PWS salinity')
# axi.plot(((S['PWN'])-(S['PWS'])),(Ubase[kk]),'ko',label='base case')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[5,5]),'ro',label='both 0.5 fresher')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[0,0]),'go',label='both 1 fresher')
# axi.set_title(kk)
# axi.legend(loc=(1,0.7))
# f.text(0.5, 0, 'PWN salinity - PWS salinity', ha='center',fontsize=14)
# # f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# # savefig(figdir_paper+'/PW_Sdep.png',bbox_inches='tight')
# # savefig(figdir_paper+'/PW_Sdep.pdf',bbox_inches='tight')
#
# plot_PW_Sdep_lines()
# Ubase.keys()
|
flexible
|
{
"blob_id": "40b94a3be27ebb0d8e3e67fddabe1dc68646169c",
"index": 9881,
"step-1": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\n<mask token>\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\n<mask token>\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\n<mask token>\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\nAM[zz].dot(x0[zz])\n16 / 35\n1.5 / 10\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\nplot_base_case_simple(Ubase, Ue, 'base')\nU\nUbase['SI'] + Ubase['FW']\nUbase['Q'] * cp * rhow / 1000000.0\n<mask token>\nbasediff\nplot_base_case_simple(Umb_sol, Umb_err, 'mb')\n[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\nS['PWN'] / S['AWS']\nS['PWS'] / S['AWS']\nS['DWS'] / S['AWS']\nUbase['PWS']\nUbase['DWS']\nUbase['PWN'] * (S['PWN'] / S['AWS'] - 1)\nUbase['PWS'] * (S['PWS'] / S['AWS'] - 1)\nUbase['DWS'] * (S['DWS'] / S['AWS'] - 1)\nUbase['FW'] + Ubase['SI']\n<mask token>\n[(kk, S[kk] - S_mb[kk]) for kk in S]\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\nplot_adep()\n<mask token>\nfor S_SI in range(0, 10, 2):\n sivar[S_SI] = {}\n for T_SI in range(-90, 5, 10):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n sivar[S_SI][T_SI] = xbase + xsol_Ad\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\nplot_SIresponse()\ncontourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])\ncolorbar()\n<mask token>\nfor S_PWNa in arange(-1, 0.05, 0.1):\n pwsvar[S_PWNa] = {}\n for S_PWSa in arange(-1.0, 0.05, 0.1):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],\n S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T\n ['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[\n 'SI'], 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad\n<mask token>\n(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0\nU_pw['FW'] + U_pw['SI']\nUbase['FW'] + Ubase['SI']\nU_si\n[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]\n[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0\nU_pw['Q'] * cp * rhow / 1000000.0\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\nlineplot_PW_salinity()\n37 / (56 + 37 + 5)\nUbase['FW']\nUbase['SI']\n<mask token>\nfor U_FW in arange(0, 0.11, 0.01):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[\n 'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\n xinit = xbase.copy()\n xinit[5] = xinit[5] + U_FW\n dv = -AM.dot(xinit)\n Evec = xinit / 5\n Evec[5:7] = 1e-10\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n fwvar[U_FW] = xinit + xsol_Ad\n<mask token>\nU['FW'] + U['SI']\nUbase['FW'] + Ubase['SI'] + 0.05\nU_fwvar['FW'] + U_fwvar['SI']\nU_fwvar['Q'] * cp * rhow / 1000000.0\nU_fwvar\n<mask token>\nfor ii, ee in enumerate(1 - epsilon):\n a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[\n 'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)\n ) / (pwmats['FW'].T + pwmats['SI'].T)\n b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat\n ) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (\n pwmats['FW'].T + pwmats['SI'].T)\n<mask token>\nPWN_Smat[10, 10]\nPWS_Smat[10, 10]\nPWN_Smat[5, 5]\nPWS_Smat[5, 5]\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\nx3_base_PWS\nUbase['PWN']\n1 - x3_base_PWS / Ubase['PWN']\n<mask token>\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar, Svar, eps):\n alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[\n 'PWN']) / Svar['AWS']\n beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']\n ) / Svar['AWS']\n return alpha_U, beta_U\n\n\nget_AWS_from_PWN(Ubase, S, 0.65)\nget_AWS_from_PWN(U_pw, S_PW, 0.65)\n",
"step-4": "<mask token>\nfigdir = (\n '/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'\n )\nfigdir_paper = (\n '/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'\n )\nWM = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_2008.nc')\nWM_mb = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_mb_2008.nc')\ncp = 3850\nrhow = 1025\ntera = 10 ** 12\nQ = -251 * tera / rhow / cp / 1000000.0\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\nU, S, T = get_U_S_T_from_WM(WM)\nU_mb, S_mb, T_mb = get_U_S_T_from_WM(WM_mb)\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\nAM = {}\nx0 = {}\nAM['base'] = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T['DWS'],\n T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\nx0['base'] = [U['PWS'], U['AWS'], U['DWS'], U['PWN'], U['AWN'], U['FW'], U[\n 'SI'], U['Q']]\nAM['massbal'] = array([[1, 1, 1, 0, 0, 0.5, 0.5, 0], [0, 0, 0, 1, 1, 0.5, \n 0.5, 0], [S_mb['PWS'], S_mb['AWS'], S_mb['DWS'], S_mb['PWN'], S_mb[\n 'AWN'], S_mb['FW'], S_mb['SI'], 0], [T_mb['PWS'], T_mb['AWS'], T_mb[\n 'DWS'], T_mb['PWN'], T_mb['AWN'], T_mb['FW'], T_mb['SI'], 1]])\nx0['massbal'] = [U_mb['PWS'], U_mb['AWS'], U_mb['DWS'], U_mb['PWN'], U_mb[\n 'AWN'], U_mb['FW'], U_mb['SI'], U_mb['Q']]\nzz = 'base'\nAM[zz].dot(x0[zz])\n16 / 35\n1.5 / 10\nSnorm = 35\nTnorm = 5\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\nUbase, Ue, xbase = run_inverse_model('base', U, S, T)\nUmb_sol, Umb_err, xmb = run_inverse_model('massbal', U_mb, S_mb, T_mb)\ncoldic = {'AWS': 'red', 'DWS': 'grey', 'PWS': 'royalblue', 'PWN': 'purple',\n 'AWN': 'orange', 'SI': 'cyan', 'FW': 'cyan', 'Q': 'limegreen'}\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\nplot_base_case_simple(Ubase, Ue, 'base')\nU\nUbase['SI'] + Ubase['FW']\nUbase['Q'] * cp * rhow / 1000000.0\nbasediff = [(kk, Ubase[kk] - U[kk]) for kk in Ubase]\nbasediff\nplot_base_case_simple(Umb_sol, Umb_err, 'mb')\n[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]\nepsilon = arange(0, 1.1, 0.1)\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\nS['PWN'] / S['AWS']\nS['PWS'] / S['AWS']\nS['DWS'] / S['AWS']\nUbase['PWS']\nUbase['DWS']\nUbase['PWN'] * (S['PWN'] / S['AWS'] - 1)\nUbase['PWS'] * (S['PWS'] / S['AWS'] - 1)\nUbase['DWS'] * (S['DWS'] / S['AWS'] - 1)\nUbase['FW'] + Ubase['SI']\na = {}\nb = {}\na['base'], b['base'] = get_a_b_fracs(Ubase, S)\na['mb'], b['mb'] = get_a_b_fracs(Umb_sol, S_mb)\n[(kk, S[kk] - S_mb[kk]) for kk in S]\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\nplot_adep()\nsivar = {}\nfor S_SI in range(0, 10, 2):\n sivar[S_SI] = {}\n for T_SI in range(-90, 5, 10):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n sivar[S_SI][T_SI] = xbase + xsol_Ad\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\nSvec, Tvec, simats = get_mats_from_dic(sivar)\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\nplot_SIresponse()\ncontourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])\ncolorbar()\npwsvar = {}\nfor S_PWNa in arange(-1, 0.05, 0.1):\n pwsvar[S_PWNa] = {}\n for S_PWSa in arange(-1.0, 0.05, 0.1):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],\n S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T\n ['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[\n 'SI'], 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad\nPWN_Svec, PWS_Svec, pwmats = get_mats_from_dic(pwsvar)\nPWN_Smat, PWS_Smat = meshgrid(PWN_Svec, PWS_Svec)\nU_si = get_U_from_x(sivar[0][-30])\nU_pw = get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])\n(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0\nU_pw['FW'] + U_pw['SI']\nUbase['FW'] + Ubase['SI']\nU_si\n[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]\n[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0\nU_pw['Q'] * cp * rhow / 1000000.0\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\nlineplot_PW_salinity()\n37 / (56 + 37 + 5)\nUbase['FW']\nUbase['SI']\nfwvar = {}\nfor U_FW in arange(0, 0.11, 0.01):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[\n 'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\n xinit = xbase.copy()\n xinit[5] = xinit[5] + U_FW\n dv = -AM.dot(xinit)\n Evec = xinit / 5\n Evec[5:7] = 1e-10\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n fwvar[U_FW] = xinit + xsol_Ad\nU_fwvar = get_U_from_x(fwvar[0.02])\na_fw, b_fw = get_a_b_fracs(U_fwvar, S)\nU['FW'] + U['SI']\nUbase['FW'] + Ubase['SI'] + 0.05\nU_fwvar['FW'] + U_fwvar['SI']\nU_fwvar['Q'] * cp * rhow / 1000000.0\nU_fwvar\nAM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] - 0.5, S['AWS'], S['DWS'], \n S['PWN'] - 0.5, S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\nxinit = xbase.copy()\nxinit[5] = xinit[5] + 0.02\ndv = -AM.dot(xinit)\nEvec = xinit / 5\nEvec[5:7] = 1e-10\nE = diag(Evec)\nWinv = diag([1, 1 / Snorm, 1 / Tnorm])\nUmat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\nLambda_inv = zeros((AM.shape[0], AM.shape[1])).T\nLambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\nxsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\nxsol_Ad = E.dot(xsol_prime)\nx_both = xinit + xsol_Ad\nU_both = get_U_from_x(x_both)\nS_PW = S.copy()\nS_PW['PWS'] = S['PWS'] - 0.5\nS_PW['PWN'] = S['PWN'] - 0.5\na_both, b_both = get_a_b_fracs(U_both, S_PW)\na_pwmat = zeros((len(epsilon), shape(pwmats['Q'])[1], shape(pwmats['Q'])[0]))\nb_pwmat = a_pwmat.copy()\nfor ii, ee in enumerate(1 - epsilon):\n a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[\n 'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)\n ) / (pwmats['FW'].T + pwmats['SI'].T)\n b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat\n ) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (\n pwmats['FW'].T + pwmats['SI'].T)\nc_pwmat = 1 - a_pwmat - b_pwmat\nPWN_Smat[10, 10]\nPWS_Smat[10, 10]\nPWN_Smat[5, 5]\nPWS_Smat[5, 5]\nepsilon = arange(0, 1.1, 0.1)\nfwcol = '#43a2ca'\nash = 'd'\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\nx3_base_PWS = get_PWN_from_FW(Ubase['PWN'], (Ubase['FW'] + Ubase['SI']) *\n a_pwmat[-1, 10, 10] * 1000.0, (Ubase['FW'] + Ubase['SI']) * a_pwmat[0, \n 10, 10] * 1000.0, 50)\nx3_base_PWS\nUbase['PWN']\n1 - x3_base_PWS / Ubase['PWN']\nx3_fresh_PWS = get_PWN_from_FW(U_pw['PWN'], (U_pw['FW'] + U_pw['SI']) *\n a_pwmat[-1, 5, 5] * 1000.0, (U_pw['FW'] + U_pw['SI']) * a_pwmat[0, 5, 5\n ] * 1000.0, 50)\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar, Svar, eps):\n alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[\n 'PWN']) / Svar['AWS']\n beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']\n ) / Svar['AWS']\n return alpha_U, beta_U\n\n\nget_AWS_from_PWN(Ubase, S, 0.65)\nget_AWS_from_PWN(U_pw, S_PW, 0.65)\n",
"step-5": "from firstfuncs_1618 import *\n\nfigdir='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'\nfigdir_paper='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'\n\n########################################################################################################\n########################################################################################################\n#### Set up the optimization framework, which allows for varying almost all elements within a prescribed range\n########################################################################################################\n########################################################################################################\nWM=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_2008.nc')\nWM_mb=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_mb_2008.nc')\n\ncp=3850\nrhow=1025\ntera=10**12\n#Noresm (taking sea ice into account)\nQ=-251*tera/rhow/cp/1e6 #for the Sverdrups\n\ndef get_U_S_T_from_WM(WM):\n U={}\n S={}\n T={}\n for wm in WM.WM:\n U[str(wm.values)]=float(WM['TRANS'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)]=float(WM['PSAL'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)]=float(WM['PTMP'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n\n U['SI']=0.073 # NorESM fresh water input v. similar to Kwok et al. 2004 70mSv\n U['FW']=0.028 # mean E-P from JRA55\n U['Q']=Q\n S['SI']=0\n S['FW']=0\n T['SI']=0\n T['FW']=0\n T['Q']=1\n\n return U,S,T\n\nU,S,T=get_U_S_T_from_WM(WM)\nU_mb,S_mb,T_mb=get_U_S_T_from_WM(WM_mb)\n\ndef get_U_from_x(x):\n U={}\n U['PWS']=x[0]\n U['AWS']=x[1]\n U['DWS']=x[2]\n U['PWN']=x[3]\n U['AWN']=x[4]\n U['FW']=x[5]\n U['SI']=x[6]\n U['Q']=x[7]\n return U\n\nAM={}\nx0={}\n\nAM['base']=array([[1,1,1,1,1,1,1,0],\\\n[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\\\n[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\nx0['base']=[U['PWS'],U['AWS'],U['DWS'],U['PWN'],U['AWN'],U['FW'],U['SI'],U['Q']]\n\nAM['massbal']=array([[1,1,1,0,0,0.5,0.5,0],\\\n[0,0,0,1,1,0.5,0.5,0],\\\n[S_mb['PWS'],S_mb['AWS'],S_mb['DWS'],S_mb['PWN'],S_mb['AWN'],S_mb['FW'],S_mb['SI'],0],\\\n[T_mb['PWS'],T_mb['AWS'],T_mb['DWS'],T_mb['PWN'],T_mb['AWN'],T_mb['FW'],T_mb['SI'],1]])\n\nx0['massbal']=[U_mb['PWS'],U_mb['AWS'],U_mb['DWS'],U_mb['PWN'],U_mb['AWN'],U_mb['FW'],U_mb['SI'],U_mb['Q']]\n\nzz='base'\nAM[zz].dot(x0[zz])\n16/35\n1.5/10\n\n#vars that I want to be handy for later calcs\nSnorm=35\nTnorm=5\ndef run_inverse_model(zz,U,S,T):\n dv=-AM[zz].dot(x0[zz])\n\n if zz=='base':\n Winv=diag([1,1/Snorm,1/Tnorm])\n elif zz=='massbal':\n Winv=diag([1,1,1/Snorm,1/Tnorm])\n\n\n Evec=array([xx/5 for xx in x0[zz]])\n # Evec=hstack((5*[1],0.02,0.02,Qvar))\n E=diag(Evec)\n\n Umat,D,VmatT=linalg.svd(Winv.dot(AM[zz].dot(E)))\n\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n xbase=x0[zz]+xsol_Ad\n P=diag(E-E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T))+linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase=get_U_from_x(xbase)\n Ue=get_U_from_x(P)\n return Ubase,Ue,xbase\n\nUbase,Ue,xbase=run_inverse_model('base',U,S,T)\n\nUmb_sol,Umb_err,xmb=run_inverse_model('massbal',U_mb,S_mb,T_mb)\n\ncoldic={'AWS':'red','DWS':'grey','PWS':'royalblue','PWN':'purple','AWN':'orange','SI':'cyan','FW':'cyan','Q':'limegreen'}\n\ndef plot_base_case_simple(Ubase,Ue,plt):\n f,axx=subplots(1,4,figsize=(9,2.5),constrained_layout=True,gridspec_kw=dict(width_ratios=[2,3,1,1]))\n\n alf=0.75\n capi=7\n #U\n axx[0].bar(range(2),[Ubase[kk] for kk in ['AWS','DWS']],color=[coldic[kk] for kk in ['AWS','DWS']],yerr=[Ue[kk] for kk in ['AWS','DWS']],capsize=capi,alpha=alf)\n axx[0].plot(range(2),[U[kk] for kk in ['AWS','DWS']],'o',color='k')\n\n ylimi=20\n axx[0].set_ylim(-ylimi,ylimi)\n ylimi=4\n axx[1].set_ylim(-ylimi,ylimi)\n axx[1].bar(range(3),[Ubase[kk] for kk in ['PWS','PWN','AWN']],color=[coldic[kk] for kk in ['PWS','PWN','AWN']],yerr=[Ue[kk] for kk in ['PWS','PWN','AWN']],capsize=capi,alpha=alf)\n axx[1].plot(range(3),[U[kk] for kk in ['PWS','PWN','AWN']],'o',color='k')\n\n axx[2].bar(range(1),U['SI']+Ubase['FW'],color=coldic['FW'],yerr=Ue['SI']+Ue['FW'],capsize=capi,alpha=alf)\n axx[2].plot(range(1),U['SI']+U['FW'],'o',color='k')\n fwlim=0.2\n axx[2].set_ylim(-fwlim,fwlim)\n\n fsz=14\n axx[0].set_ylabel('Volume transport [Sv]',fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]',fontsize=fsz)\n axx[3].bar(0,cp*rhow*(Ubase['Q'])/1e6,color=coldic['Q'],yerr=cp*rhow*Ue['Q']/1e6,capsize=capi,alpha=alf)\n axx[3].plot(0,cp*rhow*(U['Q'])/1e6,'o',color='k')\n\n for ii in range(3):\n axx[ii].axhline(0,color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS','DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS','PWN','AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n\n savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.pdf',bbox_inches='tight')\n\nplot_base_case_simple(Ubase,Ue,'base')\n\nU\n\nUbase['SI']+Ubase['FW']\n\nUbase['Q']*cp*rhow/1e6\n\nbasediff=[(kk,Ubase[kk]-U[kk]) for kk in Ubase]\nbasediff\n\nplot_base_case_simple(Umb_sol,Umb_err,'mb')\n[(kk,Umb_sol[kk]-U_mb[kk]) for kk in Ubase]\n##################################################################################\n# Calculate fraction of fresh water vs. other water masses that goes into each limb\n#################################################################################\n#fraction of PWN in DWS limb\nepsilon=arange(0,1.1,0.1)\n\ndef get_a_b_fracs(Ubase,S):\n #fraction of FW in PWS, as a function of epsilon\n a=((1-epsilon)*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['PWS']*(S['PWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])\n #fraction of FW in DWS, as a function of epsilon\n b=(epsilon*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['DWS']*(S['DWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])\n return a,b\n\n\n\nS['PWN']/S['AWS']\nS['PWS']/S['AWS']\nS['DWS']/S['AWS']\n\nUbase['PWS']\nUbase['DWS']\nUbase['PWN']*(S['PWN']/S['AWS']-1)\nUbase['PWS']*(S['PWS']/S['AWS']-1)\nUbase['DWS']*(S['DWS']/S['AWS']-1)\n\n(Ubase['FW']+Ubase['SI'])\n\na={}\nb={}\na['base'],b['base']=get_a_b_fracs(Ubase,S)\na['mb'],b['mb']=get_a_b_fracs(Umb_sol,S_mb)\n[(kk,S[kk]-S_mb[kk]) for kk in S]\ndef plot_adep():\n for ii,kk in enumerate(a):\n plot(1-epsilon,a[kk],linewidth=3,label=kk,color='C'+str(ii))\n\n xlabel('$\\mathbf{1-\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0,1)\n axhline(0,color='k')\n legend()\n savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.pdf',bbox_inches='tight')\n\n\nplot_adep()\n\n#################################################################################\n##### Look into how much Sea ice properties matter\n#################################################################################\nsivar={}\nfor S_SI in range(0,10,2):\n sivar[S_SI]={}\n for T_SI in range(-90,5,10):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S_SI,0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T_SI,1]])\n\n dv=-AM.dot(xbase)\n\n Evec=array(hstack(([1]*5,xbase[-3:]/5)))\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n sivar[S_SI][T_SI]=xbase+xsol_Ad\n\ndef get_mats_from_dic(sivar):\n Svec=array([float(ff) for ff in sivar])\n Tvec=array([float(ff) for ff in sivar[Svec[0]]])\n simats={}\n for QQ,kk in enumerate(Ubase):\n simats[kk]=zeros((len(Svec),len(Tvec)))\n for ii,ss in enumerate(Svec):\n for jj,tt in enumerate(Tvec):\n simats[kk][ii,jj]=sivar[ss][tt][QQ]\n return Svec,Tvec,simats\n\nSvec,Tvec,simats=get_mats_from_dic(sivar)\n\ndef plot_SIresponse():\n f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)\n axivec=array([])\n for axirow in axx:\n for axi in axirow:\n axivec=hstack((axivec,axi))\n for axi,kk in zip(axivec,simats):\n if (kk=='FW') | (kk=='SI'):\n climi=10\n contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)\n axi.set_title(kk+' [mSv]')\n cbar=colorbar(contit,ax=axi,format='%1.0f')\n elif kk=='Q':\n climi=30\n contit=axi.contourf(Svec,Tvec,cp*rhow*(simats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)\n axi.set_title(kk+' [TW]')\n cbar=colorbar(contit,ax=axi,format='%2.0f')\n else:\n climi=0.3\n contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)\n axi.set_title(kk+' [Sv]')\n cbar=colorbar(contit,ax=axi,format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n\n f.text(0.5, 0, 'sea ice salinity', ha='center',fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va='center',rotation='vertical',fontsize=14)\n\n savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.pdf',bbox_inches='tight')\n\nplot_SIresponse()\n\ncontourf(simats['AWN'].T-Ubase['AWN']+simats['PWN'].T-Ubase['PWN'])\ncolorbar()\n\n#################################################################################\n##### Test dependence on PW salinity (both north and south)\n#################################################################################\n\npwsvar={}\nfor S_PWNa in arange(-1,0.05,0.1):\n pwsvar[S_PWNa]={}\n for S_PWSa in arange(-1.0,0.05,0.1):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS']+S_PWSa,S['AWS'],S['DWS'],S['PWN']+S_PWNa,S['AWN'],S['FW'],S['SI'],0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\n dv=-AM.dot(xbase)\n\n Evec=array(hstack(([1]*5,xbase[-3:]/5)))\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa]=xbase+xsol_Ad\n\nPWN_Svec,PWS_Svec,pwmats=get_mats_from_dic(pwsvar)\n\n\n####################################################################################################\n######## Response is pretty uniform: try to tease out a pattern (and look at other deps?) #######\n##################################################################################################\nPWN_Smat,PWS_Smat=meshgrid(PWN_Svec,PWS_Svec)\n\n\nU_si=get_U_from_x(sivar[0][-30])\n\nU_pw=get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])\n\n\n\n(U_pw['FW']+U_pw['SI']-(Ubase['FW']+Ubase['SI']))*1e3\n\n\nU_pw['FW']+U_pw['SI']\n\nUbase['FW']+Ubase['SI']\n\nU_si\n\n[(kk,U_si[kk]-Ubase[kk]) for kk in Ubase]\n\n[U_si[kk]-Ubase[kk] for kk in Ubase][-1]*cp*rhow/1e6\n\nU_pw['Q']*cp*rhow/1e6\n\ndef lineplot_PW_salinity():\n f,axx=subplots(1,3,figsize=(11,3),sharey=True)\n xind=-1\n yind=-1\n svr=len(PWS_Svec)\n xvar=[(S['PWN']+PWN_Smat)[xind,:],(S['PWS']+PWS_Smat)[:,yind],[(S['PWS']+PWS_Smat)[ii,ii] for ii in range(svr)]]\n ufw_tot=-Ubase['SI']-Ubase['FW']\n yvar_fw=[pwmats['FW'].T[xind,:]+pwmats['SI'].T[xind,:]+ufw_tot,pwmats['FW'].T[:,yind]+pwmats['SI'].T[:,yind]+ufw_tot,array([pwmats['FW'].T[ii,ii]+pwmats['SI'].T[ii,ii]+ufw_tot for ii in range(svr)])]\n yvar_Q=[pwmats['Q'].T[xind,:]-Ubase['Q'],pwmats['Q'].T[:,yind]-Ubase['Q'],array([pwmats['Q'].T[ii,ii]-Ubase['Q'] for ii in range(svr)])]\n xlab=['PWN salinity','PWS salinity','PWS salinity']\n titvec=['a) Vary PWN salinity\\n\\nPWS = 34.4','b) Vary PWS salinity\\n\\nPWN = 33.7','c) Vary both PW salinities']\n lw=2\n for kk in ['AWS','PWS','DWS','AWN','PWN']:\n axx[0].plot(xvar[0],(pwmats[kk].T[xind,:]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)\n axx[1].plot(xvar[1],(pwmats[kk].T[:,yind]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)\n axx[2].plot(xvar[2],array([(pwmats[kk].T[ii,ii]-Ubase[kk])for ii in range(svr)]),color=coldic[kk],label=kk,linewidth=lw)\n for ii in range(3):\n ax1=axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii],(yvar_fw[ii])*1e3,color='c',linewidth=lw)\n ax2=axx[ii].twinx()\n ax2.plot(xvar[ii],cp*rhow*(yvar_Q[ii])/1e6,color='limegreen',linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10,10)\n ax2.set_ylim(-40,40)\n axx[ii].set_title(titvec[ii],fontweight='bold')\n if ii!=2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0],xvar[ii][-1])\n axx[0].set_ylim(-1.5,1.5)\n axx[0].set_yticks(arange(-1,1.1,0.5))\n ax2.spines[\"right\"].set_position((\"axes\", 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]',color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]',color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg=axx[0].legend(loc=(0.5,-0.5),ncol=5,fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2=axx[2].twiny()\n axi2.set_xticks(arange(32.8,33.8,0.2))\n axi2.set_xlim(xvar[0][0],xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4-0.5,color='k',zorder=0)\n # axx[0].set_title('a) Vary PWN salinities\\n\\n',fontweight='bold')\n # axx[1].set_title('b) Vary PWS salinities\\n\\n',fontweight='bold')\n # axx[2].set_title('c) Vary both PW salinities',fontweight='bold')\n savefig(figdir_paper+'/PWS_dep.png',bbox_inches='tight')\n savefig(figdir_paper+'/PWS_dep.pdf',bbox_inches='tight')\n\nlineplot_PW_salinity()\n37/(56+37+5)\n\n\n#######################################################################################\n############## What happens if we add more FW? (Like 100mSv) ###########################\n#######################################################################################\nUbase['FW']\nUbase['SI']\n\n\nfwvar={}\nfor U_FW in arange(0,0.11,0.01):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\n xinit=xbase.copy()\n xinit[5]=xinit[5]+U_FW\n dv=-AM.dot(xinit)\n\n Evec=xinit/5\n Evec[5:7]=1e-10\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n fwvar[U_FW]=xinit+xsol_Ad\n\nU_fwvar=get_U_from_x(fwvar[0.02])\na_fw,b_fw=get_a_b_fracs(U_fwvar,S)\nU['FW']+U['SI']\nUbase['FW']+Ubase['SI']+0.05\nU_fwvar['FW']+U_fwvar['SI']\n\nU_fwvar['Q']*cp*rhow/1e6\nU_fwvar\n\n#######################################################################################\n############## What happens if we add more FW and make PWS fresher? ###########################\n#######################################################################################\n\nAM=array([[1,1,1,1,1,1,1,0],\\\n[S['PWS']-0.5,S['AWS'],S['DWS'],S['PWN']-0.5,S['AWN'],S['FW'],S['SI'],0],\\\n[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\nxinit=xbase.copy()\nxinit[5]=xinit[5]+0.02\ndv=-AM.dot(xinit)\n\nEvec=xinit/5\nEvec[5:7]=1e-10\nE=diag(Evec)\nWinv=diag([1,1/Snorm,1/Tnorm])\nUmat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\nLambda_inv = zeros((AM.shape[0], AM.shape[1])).T\nLambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\nxsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\nxsol_Ad=E.dot(xsol_prime)\nx_both=xinit+xsol_Ad\nU_both=get_U_from_x(x_both)\n\nS_PW=S.copy()\nS_PW['PWS']=S['PWS']-0.5\nS_PW['PWN']=S['PWN']-0.5\na_both,b_both=get_a_b_fracs(U_both,S_PW)\n#######################################################################################\n############## Now look at consequences for FW dist ###########################\n#######################################################################################\na_pwmat=zeros((len(epsilon),shape(pwmats['Q'])[1],shape(pwmats['Q'])[0]))\nb_pwmat=a_pwmat.copy()\nfor ii,ee in enumerate(1-epsilon):\n a_pwmat[ii,:,:]=(ee*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['PWS'].T*((S['PWS']+PWS_Smat)/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)\n b_pwmat[ii,:,:]=((1-ee)*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['DWS'].T*(S['DWS']/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)\nc_pwmat=1-a_pwmat-b_pwmat\n\nPWN_Smat[10,10]\nPWS_Smat[10,10]\n\nPWN_Smat[5,5]\nPWS_Smat[5,5]\n\nepsilon=arange(0,1.1,0.1)\n\nfwcol='#43a2ca'\nash='d'\n\ndef plot_adep_pw():\n f,axx=subplots(1,2,figsize=(11,3.2),sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii,var in enumerate([a_pwmat,b_pwmat]):\n if ii==0:\n xvar=(1-epsilon)\n xvar2=1\n xvar3=0\n else:\n xvar=epsilon\n xvar2=0\n xvar3=1\n axx[ii].plot(xvar*Ubase['PWN'],var[:,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,linewidth=3,color='k',label='Base case',zorder=5)\n axx[ii].plot(xvar*U_pw['PWN'],var[:,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,color='purple',zorder=4,label='Polar Waters fresher by 0.5',linewidth=3)\n axx[ii].plot(xvar2*Ubase['PWN'],var[0,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,'o',color='k',label='',zorder=5)\n axx[ii].plot(xvar2*U_pw['PWN'],var[0,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,'o',color='purple',zorder=4,label='')\n axx[ii].plot(xvar3*Ubase['PWN'],var[-1,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,ash,color='k',label='',zorder=5)\n axx[ii].plot(xvar3*U_pw['PWN'],var[-1,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,ash,color='purple',zorder=4,label='')\n axx[ii].set_ylim(-30,140)\n axx[0].plot((1-epsilon)*U_fwvar['PWN'],a_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol,label='Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon*U_fwvar['PWN'],b_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol)\n axx[0].plot(U_fwvar['PWN'],a_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')\n axx[1].plot(0,b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')\n axx[0].plot(0,a_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')\n axx[1].plot(U_fwvar['PWN'],b_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')\n axx[0].plot(0.5,56,'*',color='k',label='',markersize=10)\n axx[0].plot(1.1,56,'*',color='purple',label='',markersize=10)\n axx[1].plot(1.3,37,'*',color='k',label='',markersize=10)\n axx[1].plot(1,37,'*',color='purple',label='',markersize=10)\n # axx[1].plot(U_fwvar['PWN'],b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'s',color='k',label='')\n\n # axx[0].plot(1-epsilon,a_both,linewidth=3,color='g',label='Both')\n # axx[1].plot(1-epsilon,b_both,linewidth=3,color='g')\n axx[0].legend(loc=(0.05,-0.5),ncol=3,fontsize=12)\n axx[0].set_title('a) Estuarine limb',fontsize=14)\n axx[1].set_title('b) Overturning limb',fontsize=14)\n axx[0].set_ylabel('$\\mathbf{\\delta}\\ U_{FW}$\\nFW transport in $\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel('$\\mathbf{\\gamma}\\ U_{FW}$\\nFW transport in $\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel('$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\\nPWN transport in $\\mathbf{PWS}$ [Sv]')\n axx[1].set_xlabel('$\\mathbf{\\epsilon} \\ U_{PWN}$\\nPWN transport in $\\mathbf{DWS}$ [Sv]')\n for axi in axx[0],axx[1]:\n axi.axhline(0,color='k')\n axi.set_xlim(-0.05,2.2)\n axx[0].axhline(56,color='k',linestyle='--')\n axx[1].axhline(37,color='k',linestyle='--')\n savefig(figdir_paper+'/FWfrac_obs_pwdep.png',bbox_inches='tight')\n savefig(figdir_paper+'/FWfrac_obs_pwdep.pdf',bbox_inches='tight')\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2,y1,y2,y3):\n x3=(y3-y1)*x2/(y2-y1)\n return x3\n\nx3_base_PWS=get_PWN_from_FW(Ubase['PWN'],(Ubase['FW']+Ubase['SI'])*a_pwmat[-1,10,10]*1e3,(Ubase['FW']+Ubase['SI'])*a_pwmat[0,10,10]*1e3,50)\n\nx3_base_PWS\nUbase['PWN']\n1-x3_base_PWS/Ubase['PWN']\n\n\nx3_fresh_PWS=get_PWN_from_FW(U_pw['PWN'],(U_pw['FW']+U_pw['SI'])*a_pwmat[-1,5,5]*1e3,(U_pw['FW']+U_pw['SI'])*a_pwmat[0,5,5]*1e3,50)\n\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar,Svar,eps):\n alpha_U=-(Uvar['PWS']*Svar['PWS']+(1-eps)*Uvar['PWN']*Svar['PWN'])/Svar['AWS']\n beta_U=-(Uvar['DWS']*Svar['DWS']+eps*Uvar['PWN']*Svar['PWN'])/Svar['AWS']\n return alpha_U,beta_U\n\nget_AWS_from_PWN(Ubase,S,0.65)\nget_AWS_from_PWN(U_pw,S_PW,0.65)\n\n############################graveyard\n\n# def plot_in_each(axi):\n# axi.plot(S['PWN'],S['PWS'],'ko',markersize=10)\n# axi.plot(S['PWN']+PWN_Svec,S['PWN']+PWN_Svec,'r-',linewidth=3)\n#\n# def plot_PW_Sdep(Svec,Tvec,simats):\n# f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)\n# axivec=array([])\n# for axirow in axx:\n# for axi in axirow:\n# axivec=hstack((axivec,axi))\n# for axi,kk in zip(axivec,simats):\n# if (kk=='FW') | (kk=='SI'):\n# climi=20\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [mSv]')\n# cbar=colorbar(contit,ax=axi,format='%1.0f')\n# plot_in_each(axi)\n# elif kk=='Q':\n# climi=30\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,cp*rhow*(pwmats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [TW]')\n# cbar=colorbar(contit,ax=axi,format='%2.0f')\n# plot_in_each(axi)\n# else:\n# climi=1.5\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [Sv]')\n# cbar=colorbar(contit,ax=axi,format='%0.2f')\n# plot_in_each(axi)\n# for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n# label.set_visible(False)\n# axi.set_ylim(S['PWS']+PWS_Svec[0],S['PWS']+PWS_Svec[-1])\n# f.text(0.5, 0, 'PWN salinity', ha='center',fontsize=14)\n# f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)\n#\n# savefig(figdir_paper+'_extra_2004/PW_Sdep.png',bbox_inches='tight')\n# savefig(figdir_paper+'_extra_2004/PW_Sdep.pdf',bbox_inches='tight')\n#\n#\n# plot_PW_Sdep(PWN_Svec,PWS_Svec,pwmats)\n\n\n# def plot_PW_Sdep_lines():\n# f,axx=subplots(2,4,figsize=(15,6),sharex=True)\n# axivec=array([])\n# for axirow in axx:\n# for axi in axirow:\n# axivec=hstack((axivec,axi))\n# for axi,kk in zip(axivec,simats):\n# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[-2,:],(pwmats[kk].T[-2,:]),label='vary PWN salinity')\n# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[:,-3],(pwmats[kk].T[:,-3]),label='vary PWS salinity')\n# axi.plot(((S['PWN'])-(S['PWS'])),(Ubase[kk]),'ko',label='base case')\n# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[5,5]),'ro',label='both 0.5 fresher')\n# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[0,0]),'go',label='both 1 fresher')\n# axi.set_title(kk)\n# axi.legend(loc=(1,0.7))\n# f.text(0.5, 0, 'PWN salinity - PWS salinity', ha='center',fontsize=14)\n# # f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)\n#\n# # savefig(figdir_paper+'/PW_Sdep.png',bbox_inches='tight')\n# # savefig(figdir_paper+'/PW_Sdep.pdf',bbox_inches='tight')\n#\n# plot_PW_Sdep_lines()\n# Ubase.keys()\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'
df = pd.read_csv(ROOT_PATH + 'train.csv')
df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')
train_images_dir = ROOT_PATH + 'train_images/'
test_images_dir = ROOT_PATH + 'test_images/'
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MyUNet(10).to(device)
model.load_state_dict(torch.load('model.pth'))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=
False, num_workers=4)
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
print(coords)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(
q_img.shape[0] * 0.25)))
cv2.imshow('Prediction', q_img)
cv2.waitKey()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
camera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757,
1354.9849], [0, 0, 1]], dtype=np.float32)
device = torch.device('cuda')
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == '__main__':
ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'
df = pd.read_csv(ROOT_PATH + 'train.csv')
df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')
train_images_dir = ROOT_PATH + 'train_images/'
test_images_dir = ROOT_PATH + 'test_images/'
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MyUNet(10).to(device)
model.load_state_dict(torch.load('model.pth'))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=
False, num_workers=4)
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
print(coords)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(
q_img.shape[0] * 0.25)))
cv2.imshow('Prediction', q_img)
cv2.waitKey()
<|reserved_special_token_1|>
import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim import lr_scheduler
from dataset.car_dataset import CarDataset
from nn.network import MyUNet
from utils.utils import coords2str, extract_coords
from utils.evaluate_map import compute_map
from utils.utils import visualize
from efficientnet_pytorch import EfficientNet
camera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757,
1354.9849], [0, 0, 1]], dtype=np.float32)
device = torch.device('cuda')
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == '__main__':
ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'
df = pd.read_csv(ROOT_PATH + 'train.csv')
df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')
train_images_dir = ROOT_PATH + 'train_images/'
test_images_dir = ROOT_PATH + 'test_images/'
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MyUNet(10).to(device)
model.load_state_dict(torch.load('model.pth'))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=
False, num_workers=4)
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
print(coords)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(
q_img.shape[0] * 0.25)))
cv2.imshow('Prediction', q_img)
cv2.waitKey()
<|reserved_special_token_1|>
import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim import lr_scheduler
from dataset.car_dataset import CarDataset
from nn.network import MyUNet
from utils.utils import coords2str, extract_coords
from utils.evaluate_map import compute_map
from utils.utils import visualize
from efficientnet_pytorch import EfficientNet
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
device = torch.device("cuda")
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == "__main__":
ROOT_PATH = "/media/andreis/storage/datasets/pku-autonomous-driving/"
df = pd.read_csv(ROOT_PATH + "train.csv")
df_test = pd.read_csv(ROOT_PATH + "sample_submission.csv")
train_images_dir = ROOT_PATH + "train_images/"
test_images_dir = ROOT_PATH + "test_images/"
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
# create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MyUNet(10).to(device)
model.load_state_dict(torch.load("model.pth"))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)
#img, mask, regr = val_dataset[0]
#output = model(torch.tensor(img[None]).to(device))
#output = output.data.cpu().numpy()
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)
#print(img_np.shape)
with torch.no_grad():
#output = model(torch.tensor(img[None]).to(device))
output = model(img.to(device))
output = output.data.cpu().numpy()
# looping over batch items
for out in output:
coords = extract_coords(out)
print(coords)
# s = coords2str(coords)
#predictions.append(s)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))
# show predictions on image
cv2.imshow("Prediction", q_img)
cv2.waitKey()
# cv2.imshow("Predictions", visualize(img_np, coords, camera_matrix))
# cv2.waitKey()
#df_val['PredictionString'] = predictions
#df_test.to_csv('predictions.csv', index=False)
#print(df_val.head())
#def sigmoid(x):
# return 1 / (1 + np.exp(-x))
#map = compute_map(df_val_gt, df_val)
#print(map)
#logits = output[0,0].data.cpu().numpy()
#sigmoids = np.apply_along_axis(sigmoid, -1, logits)
#print(output.shape)
#print(logits.shape)
#print(sigmoids.shape)
#print(sigmoids)
#print(np.max(sigmoids))
#points = np.argwhere(logits > 0)
#print(points)
#preds = extract_coords(output)
#img = np.rollaxis(img, 0, 3)
#print(type(img))
#cv2.imshow("imagine", img)
#cv2.imshow("mask", mask)
#cv2.imshow("regr", regr[:,:,-1])
#cv2.imshow("predictions", sigmoids)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "1861c394fb02643d2e6ac8362f3340f512ef6d72",
"index": 6402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-3": "<mask token>\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-4": "import gc\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport cv2\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom dataset.car_dataset import CarDataset\nfrom nn.network import MyUNet\nfrom utils.utils import coords2str, extract_coords\nfrom utils.evaluate_map import compute_map\nfrom utils.utils import visualize\nfrom efficientnet_pytorch import EfficientNet\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379], [0, 2305.8757, \n 1354.9849], [0, 0, 1]], dtype=np.float32)\ndevice = torch.device('cuda')\nIMG_WIDTH = 1024\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\nMODEL_SCALE = 8\nif __name__ == '__main__':\n ROOT_PATH = '/media/andreis/storage/datasets/pku-autonomous-driving/'\n df = pd.read_csv(ROOT_PATH + 'train.csv')\n df_test = pd.read_csv(ROOT_PATH + 'sample_submission.csv')\n train_images_dir = ROOT_PATH + 'train_images/'\n test_images_dir = ROOT_PATH + 'test_images/'\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\n df_val_gt = df_val.copy()\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = MyUNet(10).to(device)\n model.load_state_dict(torch.load('model.pth'))\n model.eval()\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=\n False, num_workers=4)\n predictions = []\n for img, _, _, img0 in tqdm(val_loader):\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\n img0 = torch.squeeze(img0).numpy()\n with torch.no_grad():\n output = model(img.to(device))\n output = output.data.cpu().numpy()\n for out in output:\n coords = extract_coords(out)\n print(coords)\n q_img = visualize(img0, coords, camera_matrix)\n print(q_img.shape)\n q_img = cv2.resize(q_img, (int(q_img.shape[1] * 0.25), int(\n q_img.shape[0] * 0.25)))\n cv2.imshow('Prediction', q_img)\n cv2.waitKey()\n",
"step-5": "import gc\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\nimport cv2 \r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torch import optim\r\nfrom torch.optim import lr_scheduler\r\n\r\nfrom dataset.car_dataset import CarDataset\r\nfrom nn.network import MyUNet\r\nfrom utils.utils import coords2str, extract_coords\r\nfrom utils.evaluate_map import compute_map\r\nfrom utils.utils import visualize\r\n\r\nfrom efficientnet_pytorch import EfficientNet\r\n\r\ncamera_matrix = np.array([[2304.5479, 0, 1686.2379],\r\n [0, 2305.8757, 1354.9849],\r\n [0, 0, 1]], dtype=np.float32)\r\n\r\ndevice = torch.device(\"cuda\")\r\n\r\nIMG_WIDTH = 1024\r\nIMG_HEIGHT = IMG_WIDTH // 16 * 5\r\nMODEL_SCALE = 8\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n ROOT_PATH = \"/media/andreis/storage/datasets/pku-autonomous-driving/\"\r\n df = pd.read_csv(ROOT_PATH + \"train.csv\")\r\n df_test = pd.read_csv(ROOT_PATH + \"sample_submission.csv\")\r\n\r\n train_images_dir = ROOT_PATH + \"train_images/\"\r\n test_images_dir = ROOT_PATH + \"test_images/\"\r\n\r\n df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)\r\n df_val_gt = df_val.copy()\r\n\r\n # create dataset objects\r\n train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)\r\n val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)\r\n test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)\r\n\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n model = MyUNet(10).to(device)\r\n\r\n model.load_state_dict(torch.load(\"model.pth\"))\r\n model.eval()\r\n\r\n val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)\r\n\r\n #img, mask, regr = val_dataset[0]\r\n\r\n #output = model(torch.tensor(img[None]).to(device))\r\n\r\n #output = output.data.cpu().numpy()\r\n \r\n predictions = []\r\n for img, _, _, img0 in tqdm(val_loader):\r\n img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)\r\n #print(img_np.shape)\r\n with torch.no_grad():\r\n #output = model(torch.tensor(img[None]).to(device))\r\n output = model(img.to(device))\r\n output = output.data.cpu().numpy()\r\n # looping over batch items\r\n for out in output:\r\n coords = extract_coords(out)\r\n print(coords)\r\n # s = coords2str(coords)\r\n \r\n #predictions.append(s)\r\n q_img = visualize(img0, coords, camera_matrix)\r\n print(q_img.shape)\r\n q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))\r\n # show predictions on image\r\n cv2.imshow(\"Prediction\", q_img)\r\n cv2.waitKey()\r\n # cv2.imshow(\"Predictions\", visualize(img_np, coords, camera_matrix))\r\n # cv2.waitKey()\r\n\r\n\r\n #df_val['PredictionString'] = predictions\r\n #df_test.to_csv('predictions.csv', index=False)\r\n #print(df_val.head())\r\n\r\n #def sigmoid(x):\r\n # return 1 / (1 + np.exp(-x))\r\n\r\n #map = compute_map(df_val_gt, df_val)\r\n #print(map)\r\n\r\n #logits = output[0,0].data.cpu().numpy()\r\n #sigmoids = np.apply_along_axis(sigmoid, -1, logits)\r\n #print(output.shape)\r\n #print(logits.shape)\r\n #print(sigmoids.shape)\r\n #print(sigmoids)\r\n #print(np.max(sigmoids))\r\n\r\n #points = np.argwhere(logits > 0)\r\n #print(points)\r\n #preds = extract_coords(output)\r\n\r\n\r\n #img = np.rollaxis(img, 0, 3)\r\n #print(type(img))\r\n\r\n #cv2.imshow(\"imagine\", img)\r\n #cv2.imshow(\"mask\", mask)\r\n #cv2.imshow(\"regr\", regr[:,:,-1])\r\n #cv2.imshow(\"predictions\", sigmoids)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
<|reserved_special_token_0|>
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
<|reserved_special_token_0|>
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
<|reserved_special_token_0|>
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
<|reserved_special_token_0|>
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros(80, dtype=float)
AngMHD = np.zeros((90, 2), dtype=float)
AngBGI = np.zeros((90, 2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1, Y1 = data1.T
X2, Y2 = data2.T
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.integrate import *
from pylab import *
from scipy.integrate import quad
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros(80, dtype=float)
AngMHD = np.zeros((90, 2), dtype=float)
AngBGI = np.zeros((90, 2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1, Y1 = data1.T
X2, Y2 = data2.T
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.integrate import *
from pylab import *
from scipy.integrate import quad
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros((80), dtype=float)
AngMHD = np.zeros((90,2), dtype=float)
AngBGI = np.zeros((90,2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b**2/p*(1 +(sin(chi))**2)
def xMHD(p, chi, b):
return -b**2/p**2*sin(chi)*cos(chi)
def PBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))
def xBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A*b**2/p**2*sin(chi)*cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12)*dx
x = x + xMHD(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12)*dx
x = x + xBGI(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
#for j in range(80):
# for i in range(90):
# Fp[j] = Fp[j] + PxiB[j, i, 0]
# print(j/10, Fp[j])
for i in range(90):
j = int(10*Pend)
AngMHD[i,0] = i
AngBGI[i,0] = i
AngMHD[i,1] = MHD[j, i, 0]
AngBGI[i,1] = BGI[j, i, 0]
# print(i, PxiB[10, i, 0])
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2*ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1,Y1 = data1.T
X2,Y2 = data2.T
plt.scatter(X1,Y1, color = 'blue', s=15, label="MHD")
plt.scatter(X2,Y2, color = 'red', s=15, label="BGI")
plt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')
plt.grid(True,which="both", ls="-")
plt.grid(True,which="both", ls="-")
plt.xlabel('$\chi$')
#plt.ylabel('$\lambda g(x_{0})$')
plt.legend()
plt.show()
#fig, ax = plt.subplots()
#x = np.linspace(0, 1)
#plt.xlim(0.0001, 1.0)
#plt.ylim(0, 0.1)
#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label="fitting")
#plt.title(''+str(PSR)+', $n_{\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\chi$ = '+str(chi)+'$^{\circ}$), $\lambda = 92$')
#plt.grid(True,which="both", ls="-")
#plt.grid(True,which="both", ls="-")
##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')
#plt.xlabel('$r_{0}/R_0$')
#plt.ylabel('$n_{\pm}$')
#plt.legend()
#plt.show()
|
flexible
|
{
"blob_id": "660334be611c30397c2f33890e1bca1fc43bd01f",
"index": 2420,
"step-1": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\n<mask token>\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\n<mask token>\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nfrom scipy.integrate import *\nfrom pylab import *\nfrom scipy.integrate import quad\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-5": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import * \r\nfrom scipy.integrate import *\r\nfrom pylab import * \r\nfrom scipy.integrate import quad\r\n\r\n\r\nMHD = np.zeros((80, 90, 5), dtype=float)\r\nBGI = np.zeros((80, 90, 5), dtype=float)\r\nFp = np.zeros((80), dtype=float) \r\nAngMHD = np.zeros((90,2), dtype=float)\r\nAngBGI = np.zeros((90,2), dtype=float) \r\nB0 = [0.5, 1.5, 3, 5, 10]\r\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\r\n\r\n\r\ndef PMHD(p, chi, b):\r\n return b**2/p*(1 +(sin(chi))**2)\r\n\r\ndef xMHD(p, chi, b):\r\n return -b**2/p**2*sin(chi)*cos(chi)\r\n\r\ndef PBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))\r\n\r\ndef xBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return A*b**2/p**2*sin(chi)*cos(chi)\r\n\r\nP0 = 0.3\r\nPend = 1\r\nB12 = 4\r\n\r\ndx = 0.0001\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PMHD(P, x, B12)*dx\r\n x = x + xMHD(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PBGI(P, x, B12)*dx\r\n x = x + xBGI(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\r\n\r\n#for j in range(80):\r\n# for i in range(90):\r\n# Fp[j] = Fp[j] + PxiB[j, i, 0] \r\n# print(j/10, Fp[j]) \r\n\r\n\r\nfor i in range(90):\r\n j = int(10*Pend)\r\n AngMHD[i,0] = i\r\n AngBGI[i,0] = i\r\n AngMHD[i,1] = MHD[j, i, 0]\r\n AngBGI[i,1] = BGI[j, i, 0]\r\n# print(i, PxiB[10, i, 0])\r\n\r\n\r\nymax = np.max(AngBGI)\r\n\r\nfig, ax = plt.subplots()\r\nx = np.linspace(0, 90)\r\nplt.xlim(1, 90)\r\nplt.ylim(0, 1.2*ymax)\r\ndata1 = np.array(AngMHD)\r\ndata2 = np.array(AngBGI)\r\nX1,Y1 = data1.T\r\nX2,Y2 = data2.T\r\nplt.scatter(X1,Y1, color = 'blue', s=15, label=\"MHD\")\r\nplt.scatter(X2,Y2, color = 'red', s=15, label=\"BGI\")\r\nplt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.xlabel('$\\chi$')\r\n#plt.ylabel('$\\lambda g(x_{0})$')\r\nplt.legend()\r\nplt.show() \r\n\r\n\r\n#fig, ax = plt.subplots()\r\n#x = np.linspace(0, 1)\r\n#plt.xlim(0.0001, 1.0)\r\n#plt.ylim(0, 0.1)\r\n#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label=\"fitting\")\r\n#plt.title(''+str(PSR)+', $n_{\\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\\chi$ = '+str(chi)+'$^{\\circ}$), $\\lambda = 92$')\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')\r\n#plt.xlabel('$r_{0}/R_0$')\r\n#plt.ylabel('$n_{\\pm}$')\r\n#plt.legend()\r\n#plt.show() ",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
class Enumerator(object):
"""For Python we just wrap the iterator"""
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
# Python 2.7
next = __next__
def __iter__(self):
return self
|
normal
|
{
"blob_id": "1ca20b0cd9217623ff039ab352acd09df8dfae1b",
"index": 8235,
"step-1": "class Enumerator(object):\n <mask token>\n <mask token>\n\n def __next__(self):\n return next(self.iterator)\n <mask token>\n\n def __iter__(self):\n return self\n",
"step-2": "class Enumerator(object):\n <mask token>\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n <mask token>\n\n def __iter__(self):\n return self\n",
"step-3": "class Enumerator(object):\n <mask token>\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-4": "class Enumerator(object):\n \"\"\"For Python we just wrap the iterator\"\"\"\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-5": "class Enumerator(object):\n \"\"\"For Python we just wrap the iterator\"\"\"\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n\n # Python 2.7\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.contrib.auth.hashers import make_password
from django.core import mail
from rest_framework import status
from django.contrib.auth.models import User
import time
from api.tests.api_test_case import CustomAPITestCase
from core.models import Member, Community, LocalCommunity, TransportCommunity, Profile, Notification
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password('user1'), email='[email protected]',
first_name='1', last_name='User', is_active=True)
moderator = self.user_model.objects.create(password=make_password('user2'), email='[email protected]',
first_name='2', last_name='User', is_active=True)
member = self.user_model.objects.create(password=make_password('user3'), email='[email protected]',
first_name='3', last_name='User', is_active=True)
other = self.user_model.objects.create(password=make_password('user4'), email='[email protected]',
first_name='4', last_name='User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description='descl1', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description='descl2', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description='descl3', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description='descl4', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description='descl5', city='Paris', country='FR',
gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description='desct1', departure='dep1', arrival='arr1',
auto_accept_member=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3, role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role='2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4, role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role='2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role='0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role='2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role='0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("0", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("1", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_mod_rights(self):
"""
Ensure a simple user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
# Test before acceptation
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_simple_member(self):
"""
Ensure a simple member cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_accept_member_with_owner_bad_request(self):
"""
Ensure accept_member request data format
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'lol': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 19
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_not_accepted_moderator(self):
"""
Ensure an non accepted moderator cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 7
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_owner_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_owner_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
|
normal
|
{
"blob_id": "75c00eec7eacd37ff0b37d26163c2304620bb9db",
"index": 5868,
"step-1": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='[email protected]', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='[email protected]', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='[email protected]', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='[email protected]', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n <mask token>\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n <mask token>\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n <mask token>\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n <mask token>\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-2": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='[email protected]', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='[email protected]', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='[email protected]', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='[email protected]', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n <mask token>\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-3": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='[email protected]', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='[email protected]', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='[email protected]', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='[email protected]', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n <mask token>\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n <mask token>\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n <mask token>\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-4": "<mask token>\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password(\n 'user1'), email='[email protected]', first_name='1', last_name=\n 'User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password(\n 'user2'), email='[email protected]', first_name='2', last_name=\n 'User', is_active=True)\n member = self.user_model.objects.create(password=make_password(\n 'user3'), email='[email protected]', first_name='3', last_name=\n 'User', is_active=True)\n other = self.user_model.objects.create(password=make_password(\n 'user4'), email='[email protected]', first_name='4', last_name=\n 'User', is_active=True)\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n lcom1 = LocalCommunity.objects.create(name='lcom1', description=\n 'descl1', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description=\n 'descl2', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description=\n 'descl3', city='Paris', country='FR', gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description=\n 'descl4', city='Paris', country='FR', gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description=\n 'descl5', city='Paris', country='FR', gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description\n ='desct1', departure='dep1', arrival='arr1', auto_accept_member\n =True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description\n ='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description\n ='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description\n ='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description\n ='desct5', departure='dep4', arrival='arr5')\n own_mbr = Member.objects.create(user=owner, community=lcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom3, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3,\n role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role=\n '2', status='0')\n own_mbr = Member.objects.create(user=owner, community=lcom4, role=\n '0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4,\n role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role=\n '2', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom5, role=\n '0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role=\n '2', status='2')\n own_mbr = Member.objects.create(user=owner, community=tcom1, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role=\n '0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role=\n '0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('0', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual('2', member.role)\n self.assertEqual('1', member.status)\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject, '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_accept_member_with_owner_bad_request(self):\n \"\"\"\n Ensure accept_member request data format\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'lol': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 19}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n url = '/api/v1/communities/3/accept_member/'\n data = {'id': 5}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 7}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_owner_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {'id': 6}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {'id': 8}\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth\n ('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-5": "from django.contrib.auth.hashers import make_password\nfrom django.core import mail\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nimport time\n\nfrom api.tests.api_test_case import CustomAPITestCase\nfrom core.models import Member, Community, LocalCommunity, TransportCommunity, Profile, Notification\n\n\nclass MemberTests(CustomAPITestCase):\n\n def setUp(self):\n \"\"\"\n Make a user for authenticating and\n testing community actions\n \"\"\"\n owner = self.user_model.objects.create(password=make_password('user1'), email='[email protected]',\n first_name='1', last_name='User', is_active=True)\n moderator = self.user_model.objects.create(password=make_password('user2'), email='[email protected]',\n first_name='2', last_name='User', is_active=True)\n member = self.user_model.objects.create(password=make_password('user3'), email='[email protected]',\n first_name='3', last_name='User', is_active=True)\n other = self.user_model.objects.create(password=make_password('user4'), email='[email protected]',\n first_name='4', last_name='User', is_active=True)\n\n Profile.objects.create(user=owner)\n Profile.objects.create(user=moderator)\n Profile.objects.create(user=member)\n Profile.objects.create(user=other)\n\n lcom1 = LocalCommunity.objects.create(name='lcom1', description='descl1', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n lcom2 = LocalCommunity.objects.create(name='lcom2', description='descl2', city='Paris', country='FR',\n gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom3 = LocalCommunity.objects.create(name='lcom3', description='descl3', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n lcom4 = LocalCommunity.objects.create(name='lcom4', description='descl4', city='Paris', country='FR',\n gps_x=0, gps_y=0,\n auto_accept_member=True)\n lcom5 = LocalCommunity.objects.create(name='lcom5', description='descl5', city='Paris', country='FR',\n gps_x=0, gps_y=0)\n tcom1 = TransportCommunity.objects.create(name='tcom1', description='desct1', departure='dep1', arrival='arr1',\n auto_accept_member=True)\n tcom2 = TransportCommunity.objects.create(name='tcom2', description='desct2', departure='dep2', arrival='arr2')\n tcom3 = TransportCommunity.objects.create(name='tcom3', description='desct3', departure='dep3', arrival='arr3')\n tcom4 = TransportCommunity.objects.create(name='tcom4', description='desct4', departure='dep4', arrival='arr4')\n tcom5 = TransportCommunity.objects.create(name='tcom5', description='desct5', departure='dep4', arrival='arr5')\n\n own_mbr = Member.objects.create(user=owner, community=lcom1, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=lcom2, role='0', status='1')\n\n own_mbr = Member.objects.create(user=owner, community=lcom3, role='0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom3, role='1', status='0')\n spl_mbr = Member.objects.create(user=member, community=lcom3, role='2', status='0')\n\n own_mbr = Member.objects.create(user=owner, community=lcom4, role='0', status='1')\n mod_mbr = Member.objects.create(user=moderator, community=lcom4, role='1', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom4, role='2', status='1')\n\n own_mbr = Member.objects.create(user=owner, community=lcom5, role='0', status='1')\n spl_mbr = Member.objects.create(user=member, community=lcom5, role='2', status='2')\n\n own_mbr = Member.objects.create(user=owner, community=tcom1, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom2, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom3, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom4, role='0', status='1')\n own_mbr = Member.objects.create(user=owner, community=tcom5, role='0', status='1')\n\n def test_setup(self):\n self.assertEqual(4, self.user_model.objects.all().count())\n self.assertEqual(10, Community.objects.all().count())\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_wrong_community(self):\n \"\"\"\n Ensure an authenticated user cannot join a community that does not exists\n \"\"\"\n url = '/api/v1/communities/15/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n self.assertEqual(15, Member.objects.all().count())\n\n def test_join_community_not_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/1/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=1)\n self.assertEqual(community, member.community)\n self.assertEqual(\"2\", member.role)\n self.assertEqual(\"0\", member.status)\n\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[SmarTribe] Nouveau membre')\n self.assertTrue('demande à faire' in mail.outbox[0].body)\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n\n def test_join_community_auto_accept(self):\n \"\"\"\n Ensure an authenticated user can join a community\n \"\"\"\n url = '/api/v1/communities/2/join_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEquals(status.HTTP_201_CREATED, response.status_code)\n\n self.assertEqual(16, Member.objects.all().count())\n member = Member.objects.get(user=self.user_model.objects.get(id=4))\n community = Community.objects.get(id=2)\n self.assertEqual(community, member.community)\n self.assertEqual(\"2\", member.role)\n self.assertEqual(\"1\", member.status)\n\n self.assertEqual(1, Notification.objects.count())\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[SmarTribe] Nouveau membre')\n self.assertTrue('fait désormais' in mail.outbox[0].body)\n\n def test_leave_community(self):\n \"\"\"\n Ensure a member can leave a community\n \"\"\"\n url = '/api/v1/communities/3/leave_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)\n\n self.assertEqual(14, Member.objects.all().count())\n\n def test_leave_community_banned(self):\n \"\"\"\n Ensure a banned member cannot leave a community\n \"\"\"\n url = '/api/v1/communities/5/leave_community/'\n\n response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n self.assertEqual(15, Member.objects.all().count())\n\n def test_list_my_memberships_without_auth(self):\n \"\"\"\n Ensure an unauthenticated user cannot list memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_my_memberships_member(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual(5, data['results'][2]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('2', data['results'][2]['status'])\n self.assertEqual('2', data['results'][0]['role'])\n self.assertEqual('2', data['results'][1]['role'])\n self.assertEqual('2', data['results'][2]['role'])\n\n def test_list_my_memberships_moderator(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])\n\n def test_list_my_memberships_owner(self):\n \"\"\"\n Ensure a user can list all his memberships\n \"\"\"\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(10, data['count'])\n\n def test_list_members_without_auth(self):\n \"\"\"\n Ensure non authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url)\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_member_rights(self):\n \"\"\"\n Ensure a non-member authenticated user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_without_mod_rights(self):\n \"\"\"\n Ensure a simple user cannot list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights_not_accepted(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/3/retrieve_members/'\n\n # Test before acceptation\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_list_members_with_mod_rights(self):\n \"\"\"\n Ensure a moderator can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n\n self.assertEqual(6, data['results'][0]['id'])\n self.assertEqual(1, data['results'][0]['user']['id'])\n self.assertEqual('0', data['results'][0]['role'])\n self.assertEqual('1', data['results'][0]['status'])\n\n self.assertEqual(7, data['results'][1]['id'])\n self.assertEqual(2, data['results'][1]['user']['id'])\n self.assertEqual('1', data['results'][1]['role'])\n self.assertEqual('1', data['results'][1]['status'])\n\n self.assertEqual(8, data['results'][2]['id'])\n self.assertEqual(3, data['results'][2]['user']['id'])\n self.assertEqual('2', data['results'][2]['role'])\n self.assertEqual('1', data['results'][2]['status'])\n\n def test_list_members_with_owner_rights(self):\n \"\"\"\n Ensure an owner can list community members\n \"\"\"\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])\n\n def test_accept_member_without_auth(self):\n \"\"\"\n Ensure a non authenticated user can not accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_simple_member(self):\n \"\"\"\n Ensure a simple member cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_owner(self):\n \"\"\"\n Ensure an owner can accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_accept_member_with_owner_bad_request(self):\n \"\"\"\n Ensure accept_member request data format\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_owner_not_found(self):\n \"\"\"\n Ensure member exists\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 19\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n\n def test_accept_member_with_not_accepted_moderator(self):\n \"\"\"\n Ensure an non accepted moderator cannot accept members\n \"\"\"\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_accept_member_with_moderator(self):\n \"\"\"\n Ensure an moderator can accept members\n \"\"\"\n mod = Member.objects.get(id=4)\n mod.status = '1'\n mod.save()\n\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')\n\n def test_ban_member_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_non_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_moderator_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 7\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_owner_with_member(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 6\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_ban_member_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_member_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('2', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership cancelled')\n\n def test_ban_owner_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/ban_member/'\n data = {\n 'id': 6\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_without_auth(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_user(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_moderator(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)\n\n def test_promote_user_with_owner(self):\n \"\"\"\n\n \"\"\"\n url = '/api/v1/communities/4/promote_moderator/'\n data = {\n 'id': 8\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(8, data['id'])\n self.assertEqual('1', data['role'])\n",
"step-ids": [
23,
29,
33,
36,
38
]
}
|
[
23,
29,
33,
36,
38
] |
import turtle
import random
shaan = turtle.Turtle()
#shaan.color(50,50,50)
#shaan.begin_fill()
for i in range (2):
shaan.forward(200)
shaan.right(90)
shaan.forward(250)
shaan.right(90)
shaan.left(60)
for i in range(4):
shaan.forward(200)
shaan.right(120)
shaan.forward(100)
shaan.left(150)
shaan.forward(100)
shaan.right(90)
shaan.forward(20)
shaan.right(90)
shaan.forward(135)
shaan.left(30)
shaan.forward(60)
shaan.right(120)
shaan.forward(32.5)
shaan.pu()
shaan.left(90)
shaan.forward(60)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(30)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(30)
shaan.pd()
for i in range(4):
shaan.forward(25)
shaan.right(90)
shaan.forward(25)
shaan.pu()
shaan.forward(32.5)
shaan.pd()
shaan.left(90)
shaan.forward(165)
shaan.left(90)
shaan.forward(100)
shaan.left(90)
shaan.forward(100)
shaan.right(90)
shaan.forward(50)
shaan.right(90)
shaan.forward(100)
shaan.right(180)
shaan.forward(75)
shaan.pu()
shaan.left(90)
shaan.forward(20)
shaan.pd()
shaan.begin_fill()
shaan.circle(5,360)
shaan.end_fill()
shaan.pu()
shaan.forward(1000)
turtle.done()
#shaan.end_fill()
|
normal
|
{
"blob_id": "6f13ebe7355d530ba3403aab54b313ecf35b1261",
"index": 4523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2):\n shaan.forward(200)\n shaan.right(90)\n shaan.forward(250)\n shaan.right(90)\nshaan.left(60)\nfor i in range(4):\n shaan.forward(200)\n shaan.right(120)\nshaan.forward(100)\nshaan.left(150)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(20)\nshaan.right(90)\nshaan.forward(135)\nshaan.left(30)\nshaan.forward(60)\nshaan.right(120)\nshaan.forward(32.5)\nshaan.pu()\nshaan.left(90)\nshaan.forward(60)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(32.5)\nshaan.pd()\nshaan.left(90)\nshaan.forward(165)\nshaan.left(90)\nshaan.forward(100)\nshaan.left(90)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(50)\nshaan.right(90)\nshaan.forward(100)\nshaan.right(180)\nshaan.forward(75)\nshaan.pu()\nshaan.left(90)\nshaan.forward(20)\nshaan.pd()\nshaan.begin_fill()\nshaan.circle(5, 360)\nshaan.end_fill()\nshaan.pu()\nshaan.forward(1000)\nturtle.done()\n",
"step-3": "<mask token>\nshaan = turtle.Turtle()\nfor i in range(2):\n shaan.forward(200)\n shaan.right(90)\n shaan.forward(250)\n shaan.right(90)\nshaan.left(60)\nfor i in range(4):\n shaan.forward(200)\n shaan.right(120)\nshaan.forward(100)\nshaan.left(150)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(20)\nshaan.right(90)\nshaan.forward(135)\nshaan.left(30)\nshaan.forward(60)\nshaan.right(120)\nshaan.forward(32.5)\nshaan.pu()\nshaan.left(90)\nshaan.forward(60)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(32.5)\nshaan.pd()\nshaan.left(90)\nshaan.forward(165)\nshaan.left(90)\nshaan.forward(100)\nshaan.left(90)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(50)\nshaan.right(90)\nshaan.forward(100)\nshaan.right(180)\nshaan.forward(75)\nshaan.pu()\nshaan.left(90)\nshaan.forward(20)\nshaan.pd()\nshaan.begin_fill()\nshaan.circle(5, 360)\nshaan.end_fill()\nshaan.pu()\nshaan.forward(1000)\nturtle.done()\n",
"step-4": "import turtle\nimport random\nshaan = turtle.Turtle()\nfor i in range(2):\n shaan.forward(200)\n shaan.right(90)\n shaan.forward(250)\n shaan.right(90)\nshaan.left(60)\nfor i in range(4):\n shaan.forward(200)\n shaan.right(120)\nshaan.forward(100)\nshaan.left(150)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(20)\nshaan.right(90)\nshaan.forward(135)\nshaan.left(30)\nshaan.forward(60)\nshaan.right(120)\nshaan.forward(32.5)\nshaan.pu()\nshaan.left(90)\nshaan.forward(60)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\nfor i in range(4):\n shaan.forward(25)\n shaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(32.5)\nshaan.pd()\nshaan.left(90)\nshaan.forward(165)\nshaan.left(90)\nshaan.forward(100)\nshaan.left(90)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(50)\nshaan.right(90)\nshaan.forward(100)\nshaan.right(180)\nshaan.forward(75)\nshaan.pu()\nshaan.left(90)\nshaan.forward(20)\nshaan.pd()\nshaan.begin_fill()\nshaan.circle(5, 360)\nshaan.end_fill()\nshaan.pu()\nshaan.forward(1000)\nturtle.done()\n",
"step-5": "import turtle\nimport random\n\nshaan = turtle.Turtle()\n\n#shaan.color(50,50,50)\n\n#shaan.begin_fill()\nfor i in range (2):\n\tshaan.forward(200)\n\tshaan.right(90)\n\tshaan.forward(250)\n\tshaan.right(90)\n\nshaan.left(60)\n\nfor i in range(4):\n\tshaan.forward(200)\n\tshaan.right(120)\n\nshaan.forward(100)\nshaan.left(150)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(20)\nshaan.right(90)\nshaan.forward(135)\nshaan.left(30)\nshaan.forward(60)\n\nshaan.right(120)\nshaan.forward(32.5)\nshaan.pu()\nshaan.left(90)\nshaan.forward(60)\nshaan.pd()\n\n\nfor i in range(4):\n\tshaan.forward(25)\n\tshaan.right(90)\n\nshaan.forward(25)\nshaan.right(90)\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\n\n\nfor i in range(4):\n\tshaan.forward(25)\n\tshaan.right(90)\n\nshaan.forward(25)\nshaan.pu()\nshaan.forward(30)\nshaan.pd()\n\nfor i in range(4):\n\tshaan.forward(25)\n\tshaan.right(90)\n\nshaan.forward(25)\nshaan.pu()\nshaan.forward(32.5)\nshaan.pd()\n\n\nshaan.left(90)\nshaan.forward(165)\nshaan.left(90)\nshaan.forward(100)\nshaan.left(90)\nshaan.forward(100)\nshaan.right(90)\nshaan.forward(50)\nshaan.right(90)\nshaan.forward(100)\nshaan.right(180)\nshaan.forward(75)\nshaan.pu()\nshaan.left(90)\nshaan.forward(20)\nshaan.pd()\nshaan.begin_fill()\nshaan.circle(5,360)\nshaan.end_fill()\nshaan.pu()\nshaan.forward(1000)\n\nturtle.done()\n\n\n\n\n\n\n\n\n\n\n#shaan.end_fill()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CountedList(UserList):
def Count(self):
self.cnt = Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList, self).append(item)
global y
y = self.Count()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CountedList(UserList):
def Count(self):
self.cnt = Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList, self).append(item)
global y
y = self.Count()
<|reserved_special_token_0|>
print(y)
countedlist.append('3')
print(y)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CountedList(UserList):
def Count(self):
self.cnt = Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList, self).append(item)
global y
y = self.Count()
countedlist = CountedList(['1', '2', '3'])
y = countedlist.Count()
print(y)
countedlist.append('3')
print(y)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from collections import UserList, Counter
class CountedList(UserList):
def Count(self):
self.cnt = Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList, self).append(item)
global y
y = self.Count()
countedlist = CountedList(['1', '2', '3'])
y = countedlist.Count()
print(y)
countedlist.append('3')
print(y)
<|reserved_special_token_1|>
"""
Counted List
Create a class for an list like object based on UserList wrapper
https://docs.python.org/3/library/collections.html#collections.UserList
That object should have a method to return a Counter
https://docs.python.org/3/library/collections.html#collections.Counter
for all objects in the list
Counter should be updated automatically for at lest 2 methods (append, pop)
"""
# example to test code
# class Example(UserList)
# ...
#
# x = Example(['1', '2', '3'])
# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})
# x.append(3)
# now y contains Counter({'1':1, '2':1 '3':2})
from collections import UserList,Counter
class CountedList(UserList):
def Count(self):
self.cnt=Counter(self.data)
return self.cnt
def append(self, item):
super(CountedList,self).append(item)
global y
y = self.Count()
countedlist=CountedList(['1', '2', '3'])
y=countedlist.Count()
print(y)
countedlist.append('3')
print(y)
|
flexible
|
{
"blob_id": "1cf4fc37e030a895cb36f537ce9e92df34acfb8b",
"index": 7659,
"step-1": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\n<mask token>\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-3": "<mask token>\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-4": "<mask token>\nfrom collections import UserList, Counter\n\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt = Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList, self).append(item)\n global y\n y = self.Count()\n\n\ncountedlist = CountedList(['1', '2', '3'])\ny = countedlist.Count()\nprint(y)\ncountedlist.append('3')\nprint(y)\n",
"step-5": "\"\"\"\nCounted List\nCreate a class for an list like object based on UserList wrapper\nhttps://docs.python.org/3/library/collections.html#collections.UserList\nThat object should have a method to return a Counter\nhttps://docs.python.org/3/library/collections.html#collections.Counter\nfor all objects in the list\nCounter should be updated automatically for at lest 2 methods (append, pop)\n\"\"\"\n\n# example to test code\n# class Example(UserList)\n# ...\n#\n# x = Example(['1', '2', '3'])\n# y = x.get_counter() # y contains Counter({'1':1, '2':1 '3':1})\n# x.append(3)\n# now y contains Counter({'1':1, '2':1 '3':2})\n\nfrom collections import UserList,Counter\n\nclass CountedList(UserList):\n\n def Count(self):\n self.cnt=Counter(self.data)\n return self.cnt\n\n def append(self, item):\n super(CountedList,self).append(item)\n global y\n y = self.Count()\n\ncountedlist=CountedList(['1', '2', '3'])\ny=countedlist.Count()\nprint(y)\n\ncountedlist.append('3')\nprint(y)\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
sys.setrecursionlimit(1000000)
n, q = map(int, input().split())
graph = [set([]) for _ in range(n)]
for _ in range(n - 1):
a, b = map(int, input().split())
graph[a - 1].add(b - 1)
graph[b - 1].add(a - 1)
def dfs(i):
if temp[i]:
return
temp[i] = True
if i in odd:
for j in graph[i]:
even.add(j)
dfs(j)
else:
for j in graph[i]:
odd.add(j)
dfs(j)
temp = [False] * n
odd = set([])
even = set([])
odd.add(0)
dfs(0)
ans = []
for i in range(q):
c, d = map(int, input().split())
if (c - 1 in odd and d - 1 in odd) or (c - 1 in even and d - 1 in even):
ans.append("Town")
else:
ans.append("Road")
for i in ans:
print(i)
|
normal
|
{
"blob_id": "bab6b9a0178da119f753deb6c626dd5c41db2bdd",
"index": 2004,
"step-1": "<mask token>\n\n\ndef dfs(i):\n if temp[i]:\n return\n temp[i] = True\n if i in odd:\n for j in graph[i]:\n even.add(j)\n dfs(j)\n else:\n for j in graph[i]:\n odd.add(j)\n dfs(j)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.setrecursionlimit(1000000)\n<mask token>\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n graph[a - 1].add(b - 1)\n graph[b - 1].add(a - 1)\n\n\ndef dfs(i):\n if temp[i]:\n return\n temp[i] = True\n if i in odd:\n for j in graph[i]:\n even.add(j)\n dfs(j)\n else:\n for j in graph[i]:\n odd.add(j)\n dfs(j)\n\n\n<mask token>\nodd.add(0)\ndfs(0)\n<mask token>\nfor i in range(q):\n c, d = map(int, input().split())\n if c - 1 in odd and d - 1 in odd or c - 1 in even and d - 1 in even:\n ans.append('Town')\n else:\n ans.append('Road')\nfor i in ans:\n print(i)\n",
"step-3": "<mask token>\nsys.setrecursionlimit(1000000)\nn, q = map(int, input().split())\ngraph = [set([]) for _ in range(n)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n graph[a - 1].add(b - 1)\n graph[b - 1].add(a - 1)\n\n\ndef dfs(i):\n if temp[i]:\n return\n temp[i] = True\n if i in odd:\n for j in graph[i]:\n even.add(j)\n dfs(j)\n else:\n for j in graph[i]:\n odd.add(j)\n dfs(j)\n\n\ntemp = [False] * n\nodd = set([])\neven = set([])\nodd.add(0)\ndfs(0)\nans = []\nfor i in range(q):\n c, d = map(int, input().split())\n if c - 1 in odd and d - 1 in odd or c - 1 in even and d - 1 in even:\n ans.append('Town')\n else:\n ans.append('Road')\nfor i in ans:\n print(i)\n",
"step-4": "import sys\nsys.setrecursionlimit(1000000)\nn, q = map(int, input().split())\ngraph = [set([]) for _ in range(n)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n graph[a - 1].add(b - 1)\n graph[b - 1].add(a - 1)\n\n\ndef dfs(i):\n if temp[i]:\n return\n temp[i] = True\n if i in odd:\n for j in graph[i]:\n even.add(j)\n dfs(j)\n else:\n for j in graph[i]:\n odd.add(j)\n dfs(j)\n\n\ntemp = [False] * n\nodd = set([])\neven = set([])\nodd.add(0)\ndfs(0)\nans = []\nfor i in range(q):\n c, d = map(int, input().split())\n if c - 1 in odd and d - 1 in odd or c - 1 in even and d - 1 in even:\n ans.append('Town')\n else:\n ans.append('Road')\nfor i in ans:\n print(i)\n",
"step-5": "import sys\n\nsys.setrecursionlimit(1000000)\nn, q = map(int, input().split())\ngraph = [set([]) for _ in range(n)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n graph[a - 1].add(b - 1)\n graph[b - 1].add(a - 1)\n\n\ndef dfs(i):\n if temp[i]:\n return\n temp[i] = True\n if i in odd:\n for j in graph[i]:\n even.add(j)\n dfs(j)\n else:\n for j in graph[i]:\n odd.add(j)\n dfs(j)\n\n\ntemp = [False] * n\nodd = set([])\neven = set([])\nodd.add(0)\ndfs(0)\nans = []\nfor i in range(q):\n c, d = map(int, input().split())\n if (c - 1 in odd and d - 1 in odd) or (c - 1 in even and d - 1 in even):\n ans.append(\"Town\")\n else:\n ans.append(\"Road\")\nfor i in ans:\n print(i)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SaveConfigFile:
<|reserved_special_token_0|>
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
if configMainName is None or configSubName is None:
return None
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, 'r+'))
if DEBUG and SYSTEM_TOOLS_DEBUG:
print(
'{SYS}{MISSION_DEBUG} config has been save in file successfully'
)
except Exception as e:
wrongFile = open('data/wrongMessage.dat', 'a+')
currentTime = str(datetime.datetime.strptime(time.strftime(
'%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))
wrongMessage = {'|currentTime': currentTime, '|file':
'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName, '|configSubName':
configSubName, '|value': value, '|wrongMessage': str(e)}
wrongFile.write(str(wrongMessage))
wrongFile.write('\n')
wrongFile.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SaveConfigFile:
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
if configMainName is None or configSubName is None:
return None
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, 'r+'))
if DEBUG and SYSTEM_TOOLS_DEBUG:
print(
'{SYS}{MISSION_DEBUG} config has been save in file successfully'
)
except Exception as e:
wrongFile = open('data/wrongMessage.dat', 'a+')
currentTime = str(datetime.datetime.strptime(time.strftime(
'%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))
wrongMessage = {'|currentTime': currentTime, '|file':
'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName, '|configSubName':
configSubName, '|value': value, '|wrongMessage': str(e)}
wrongFile.write(str(wrongMessage))
wrongFile.write('\n')
wrongFile.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SaveConfigFile:
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
if configMainName is None or configSubName is None:
return None
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, 'r+'))
if DEBUG and SYSTEM_TOOLS_DEBUG:
print(
'{SYS}{MISSION_DEBUG} config has been save in file successfully'
)
except Exception as e:
wrongFile = open('data/wrongMessage.dat', 'a+')
currentTime = str(datetime.datetime.strptime(time.strftime(
'%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))
wrongMessage = {'|currentTime': currentTime, '|file':
'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName, '|configSubName':
configSubName, '|value': value, '|wrongMessage': str(e)}
wrongFile.write(str(wrongMessage))
wrongFile.write('\n')
wrongFile.close()
if __name__ == '__main__':
s = SaveConfigFile(fileName=
'F:\\python17\\pythonPro\\MemortAssit\\conf\\main.ini')
print(s.saveConfigFile('VERSION', 'version', 'v1.0'))
<|reserved_special_token_1|>
from src.Client.Conf.config import *
class SaveConfigFile:
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
if configMainName is None or configSubName is None:
return None
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, 'r+'))
if DEBUG and SYSTEM_TOOLS_DEBUG:
print(
'{SYS}{MISSION_DEBUG} config has been save in file successfully'
)
except Exception as e:
wrongFile = open('data/wrongMessage.dat', 'a+')
currentTime = str(datetime.datetime.strptime(time.strftime(
'%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))
wrongMessage = {'|currentTime': currentTime, '|file':
'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName, '|configSubName':
configSubName, '|value': value, '|wrongMessage': str(e)}
wrongFile.write(str(wrongMessage))
wrongFile.write('\n')
wrongFile.close()
if __name__ == '__main__':
s = SaveConfigFile(fileName=
'F:\\python17\\pythonPro\\MemortAssit\\conf\\main.ini')
print(s.saveConfigFile('VERSION', 'version', 'v1.0'))
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
from src.Client.Conf.config import *
class SaveConfigFile():
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
# 防御编程 若value不是string,转换则在这转换
if configMainName is None or configSubName is None:
return None
# 写回配置文件
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, "r+"))
# 打印debug日志
if DEBUG and SYSTEM_TOOLS_DEBUG:
print('{SYS}{MISSION_DEBUG} config has been save in file successfully')
except Exception as e:
# 打开错误日志文件
wrongFile = open('data/wrongMessage.dat', 'a+')
# 获取当前时间
currentTime = str(
datetime.datetime.strptime(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()), '%Y-%m-%d-%H-%M-%S'))
# 生成报错的错误信息
wrongMessage = {
'|currentTime': currentTime,
'|file': 'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName,
'|configSubName': configSubName,
'|value': value,
'|wrongMessage': str(e)
}
# 存入文件
wrongFile.write(str(wrongMessage))
# 增加换行符
wrongFile.write('\n')
wrongFile.close()
# 配置文件读取测试
if __name__ == '__main__':
s = SaveConfigFile(fileName='F:\python17\pythonPro\MemortAssit\conf\main.ini')
print(s.saveConfigFile('VERSION', 'version', 'v1.0'))
|
flexible
|
{
"blob_id": "b61bb47f3e059c607447cea92ce1712825735822",
"index": 2373,
"step-1": "<mask token>\n\n\nclass SaveConfigFile:\n <mask token>\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\nif __name__ == '__main__':\n s = SaveConfigFile(fileName=\n 'F:\\\\python17\\\\pythonPro\\\\MemortAssit\\\\conf\\\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-4": "from src.Client.Conf.config import *\n\n\nclass SaveConfigFile:\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n if configMainName is None or configSubName is None:\n return None\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, 'r+'))\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print(\n '{SYS}{MISSION_DEBUG} config has been save in file successfully'\n )\n except Exception as e:\n wrongFile = open('data/wrongMessage.dat', 'a+')\n currentTime = str(datetime.datetime.strptime(time.strftime(\n '%Y-%m-%d-%H-%M-%S', time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n wrongMessage = {'|currentTime': currentTime, '|file':\n 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName, '|configSubName':\n configSubName, '|value': value, '|wrongMessage': str(e)}\n wrongFile.write(str(wrongMessage))\n wrongFile.write('\\n')\n wrongFile.close()\n\n\nif __name__ == '__main__':\n s = SaveConfigFile(fileName=\n 'F:\\\\python17\\\\pythonPro\\\\MemortAssit\\\\conf\\\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-5": "# -*- coding:utf-8 -*-\n\n\nfrom src.Client.Conf.config import *\n\n\nclass SaveConfigFile():\n \"\"\"\n 该类负责保存配置文件,属于实际操作类\n \"\"\"\n\n def __init__(self, fileName='../conf/main.ini'):\n self.config = ConfigParser.ConfigParser()\n self.fileName = fileName\n\n def saveConfigFile(self, configMainName, configSubName, value):\n \"\"\"\n\n :param missionId: 需要保存的任务id (int 或者 string)\n :return:\n \"\"\"\n try:\n # 防御编程 若value不是string,转换则在这转换\n if configMainName is None or configSubName is None:\n return None\n # 写回配置文件\n self.config.read(self.fileName)\n self.config.set(configMainName, configSubName, value)\n self.config.write(open(self.fileName, \"r+\"))\n # 打印debug日志\n if DEBUG and SYSTEM_TOOLS_DEBUG:\n print('{SYS}{MISSION_DEBUG} config has been save in file successfully')\n except Exception as e:\n # 打开错误日志文件\n wrongFile = open('data/wrongMessage.dat', 'a+')\n # 获取当前时间\n currentTime = str(\n datetime.datetime.strptime(time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()), '%Y-%m-%d-%H-%M-%S'))\n # 生成报错的错误信息\n wrongMessage = {\n '|currentTime': currentTime,\n '|file': 'SystemTools-ConfFileRead-saveConfigFile',\n '|configMainName': configMainName,\n '|configSubName': configSubName,\n '|value': value,\n '|wrongMessage': str(e)\n }\n # 存入文件\n wrongFile.write(str(wrongMessage))\n # 增加换行符\n wrongFile.write('\\n')\n wrongFile.close()\n\n\n# 配置文件读取测试\nif __name__ == '__main__':\n s = SaveConfigFile(fileName='F:\\python17\\pythonPro\\MemortAssit\\conf\\main.ini')\n print(s.saveConfigFile('VERSION', 'version', 'v1.0'))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Plot funcs
Jan, 2018 Rose Yu @Caltech
"""
import matplotlib.pyplot as plt
import seaborn as sns
from util.matutil import *
from util.batchutil import *
def plot_img():
"""
plot ground truth (left) and reconstruction (right)
showing b/w image data of mnist
"""
plt.subplot(121)
plt.imshow(data.data.numpy()[0,].squeeze())
plt.subplot(122)
plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
sample = model.sample_z(data)
plt.imshow(sample)
def plot_kde():
"""
plot the kernel density estimation for 2d distributions
"""
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)
sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color="r", shade=True, ax=ax1)
sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color="b", shade=True, ax=ax2)
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
def plot_ts(data, enc_mean, dec_mean):
"""
plot time series with uncertainty
"""
# enc_mean, enc_cov = enc
# dec_mean, dec_cov = dec
batch_size = data.size()[0]
D = 2
N = int(data.size()[1]/D)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)
# plot data
plt.axes(ax1)
ax1.set_ylim(-0.1,0.1)
sns.tsplot(data.view(batch_size,N,-1).data.numpy())
# plot reconstruction
plt.axes(ax2)
ax2.set_ylim(-0.1,0.1)
sns.tsplot(dec_mean.view(batch_size,N,-1).data.numpy())
plt.axes(ax3)
sample_Sigma = bivech2(enc_mean.view(batch_size,N,-1))
sample_vechSigma = bvech(sample_Sigma).data.numpy()
sns.tsplot(sample_vechSigma)
# plot latent variables
# sample_Sigma = ivech2x(enc_cov.data.numpy())
# sample_vechSigma = vechx(sample_Sigma.reshape((-1,N,N)))
# sns.tsplot(sample_vechSigma)
|
normal
|
{
"blob_id": "cca9d91fe20e58f233ccfc4100edb748356ed234",
"index": 6311,
"step-1": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1] / D)\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n plt.axes(ax1)\n ax1.set_ylim(-0.1, 0.1)\n sns.tsplot(data.view(batch_size, N, -1).data.numpy())\n plt.axes(ax2)\n ax2.set_ylim(-0.1, 0.1)\n sns.tsplot(dec_mean.view(batch_size, N, -1).data.numpy())\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size, N, -1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n sns.tsplot(sample_vechSigma)\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom util.matutil import *\nfrom util.batchutil import *\n\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1, 28, 28).data.numpy()[0,].squeeze())\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n sample = model.sample_z(data)\n plt.imshow(sample)\n\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:, 0], data.data.numpy()[:, 1], color='r',\n shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:, 0], dec_mean.data.numpy()[:, 1],\n color='b', shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-06)\n plt.gcf().clear()\n\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1] / D)\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n plt.axes(ax1)\n ax1.set_ylim(-0.1, 0.1)\n sns.tsplot(data.view(batch_size, N, -1).data.numpy())\n plt.axes(ax2)\n ax2.set_ylim(-0.1, 0.1)\n sns.tsplot(dec_mean.view(batch_size, N, -1).data.numpy())\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size, N, -1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n sns.tsplot(sample_vechSigma)\n",
"step-5": "\"\"\"\nPlot funcs \nJan, 2018 Rose Yu @Caltech \n\"\"\"\nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom util.matutil import *\nfrom util.batchutil import *\n\ndef plot_img():\n \"\"\" \n plot ground truth (left) and reconstruction (right)\n showing b/w image data of mnist\n \"\"\"\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)\n\ndef plot_kde():\n \"\"\"\n plot the kernel density estimation for 2d distributions\n \"\"\"\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n\ndef plot_ts(data, enc_mean, dec_mean):\n \"\"\"\n plot time series with uncertainty\n \"\"\"\n # enc_mean, enc_cov = enc\n # dec_mean, dec_cov = dec\n\n batch_size = data.size()[0]\n D = 2\n N = int(data.size()[1]/D)\n\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)\n # plot data\n plt.axes(ax1)\n ax1.set_ylim(-0.1,0.1)\n\n sns.tsplot(data.view(batch_size,N,-1).data.numpy())\n\n # plot reconstruction\n plt.axes(ax2)\n ax2.set_ylim(-0.1,0.1)\n sns.tsplot(dec_mean.view(batch_size,N,-1).data.numpy())\n\n plt.axes(ax3)\n sample_Sigma = bivech2(enc_mean.view(batch_size,N,-1))\n sample_vechSigma = bvech(sample_Sigma).data.numpy()\n \n sns.tsplot(sample_vechSigma)\n\n # plot latent variables\n # sample_Sigma = ivech2x(enc_cov.data.numpy())\n # sample_vechSigma = vechx(sample_Sigma.reshape((-1,N,N)))\n # sns.tsplot(sample_vechSigma)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import gdal
import sys, gc
sys.path.append("..")
import getopt
import redis
import time
import subprocess
import numpy as np
from os import listdir, makedirs
from os.path import isfile, join, exists
from operator import itemgetter
from natsort import natsorted
from config import DatasetConfig, RasterParams
from dataset.threads.band_analyzer_thread import BandAnalyzerThread
from dataset.threads.sample_selector_thread_status import SampleSelectorThreadStatus
TACTIC_UPSAMPLE = 'upsample'
TACTIC_DOWNSAMPLE = 'downsample'
TACTIC_NONE = 'none'
OPERATION_CREATE = 1000
OPERATION_ANALYZE = 2000
OPERATION_FULLANALYZE = 2500
OPERATION_MIX = 3000
OPERATION_SUMMARIZE = 4000
def main(argv):
"""
Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do
the expected job.
:param argv: (dictionary) options and values specified in the command line
"""
print('Preparing for balanced downsampler indexer by factor')
gdal.UseExceptions()
dataset_folder = None
storage_folder = None
tactic = TACTIC_DOWNSAMPLE
operation = OPERATION_MIX
beginning = 5
ending = 100
jump = 5
iterations = 10
try:
opts, args = getopt.getopt(argv, "hs:d:t:cafmob:e:j:i:",
["dataset_folder=", "storage_folder=", "tactic=", "create", "analyze",
"full_analyze", "mix", "out", "begin=", "end=", "jump=", "iterations="])
except getopt.GetoptError:
print(
'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print(
'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')
sys.exit()
elif opt in ["-s", "--dataset_folder"]:
dataset_folder = arg
elif opt in ["-d", "--storage_folder"]:
storage_folder = arg
elif opt in ["-t", "--tactic"]:
if arg == 'upsample':
tactic = TACTIC_UPSAMPLE
elif arg == 'downsample':
tactic = TACTIC_DOWNSAMPLE
else:
tactic = TACTIC_NONE
elif opt in ["-c", "--create"]:
operation = OPERATION_CREATE
elif opt in ["-a", "--analyze"]:
operation = OPERATION_ANALYZE
elif opt in ["-f", "--full_analyze"]:
operation = OPERATION_FULLANALYZE
elif opt in ["-m", "--mix"]:
operation = OPERATION_MIX
elif opt in ["-o", "--summarize"]:
operation = OPERATION_SUMMARIZE
elif opt in ["-b", "--beginning"]:
beginning = int(arg)
elif opt in ["-e", "--ending"]:
ending = int(arg)
elif opt in ["-j", "--jump"]:
jump = int(arg)
elif opt in ["-i", "--iterations"]:
iterations = int(arg)
print('Working with dataset folder %s' % dataset_folder)
if operation == OPERATION_CREATE or operation == OPERATION_MIX:
indexes_creator(dataset_folder, tactic, storage_folder, beginning, ending, jump, iterations)
if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:
full_dataset_analyzer(dataset_folder, storage_folder, tactic)
if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:
dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump)
if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:
analysis_summarizer(storage_folder, beginning, ending, jump)
sys.exit()
def indexes_creator(dataset_folder, tactic, storage_folder, beginning, ending, jump, iterations):
sample_rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]
sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
print('Folders to work with: ', sample_rasters_folders)
print('Checking number of indexes...')
cnt_idx_0 = 0
cnt_idx_1 = 0
for i, pck in enumerate(sample_rasters_folders):
path_to_pck = join(dataset_folder, pck, 'idxs.npz')
pck_data_idx_0 = pck_data_idx_1 = None
item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')
with np.load(path_to_pck) as df:
pck_data_idx_0, pck_data_idx_1 = item_getter(df)
cnt_idx_0 += pck_data_idx_0.shape[0]
cnt_idx_1 += pck_data_idx_1.shape[0]
forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True
class_total = 0
if tactic == TACTIC_UPSAMPLE:
if not forest_dominance:
class_total = cnt_idx_0
else:
class_total = cnt_idx_1
else:
if not forest_dominance:
class_total = cnt_idx_1
else:
class_total = cnt_idx_0
# Retrieving all indexes from the different zones and putting them in memory
bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)
bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)
print('Number of indexes for No Forest: %s' % (str(len(bigdata_idx_0))))
print('Number of indexes for Forest: %s' % (str(len(bigdata_idx_1))))
print('Copying and appending index values...')
current_0_idx = 0
current_1_idx = 0
for i, pck in enumerate(sample_rasters_folders):
path_to_pck = join(dataset_folder, pck, 'idxs.npz')
pck_bigdata_idx_0 = pck_bigdata_idx_1 = None
item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')
with np.load(path_to_pck) as df:
pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)
bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:] = pck_bigdata_idx_0
bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:] = pck_bigdata_idx_1
bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0] = i
bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0] = i
current_0_idx += len(pck_bigdata_idx_0)
current_1_idx += len(pck_bigdata_idx_1)
# Now we go through each percentage sampling
for percentage in range(beginning, ending, jump):
# Calculating sampling amount and determining if upsampling is needed
upsample_required = False
upsample_amount = 0
class_percentage = None
if tactic == TACTIC_UPSAMPLE:
class_percentage = int(class_total * percentage / 100.0)
if not forest_dominance:
if class_percentage > cnt_idx_1:
upsample_required = True
upsample_amount = class_percentage - cnt_idx_1
else:
if class_percentage > cnt_idx_0:
upsample_required = True
upsample_amount = class_percentage - cnt_idx_0
else:
class_percentage = int(class_total * percentage / 100.0)
folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else TACTIC_DOWNSAMPLE) + '-' + str(
int(percentage)) + 'p'
analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)
if not exists(analysis_path):
makedirs(analysis_path)
print('Performing initial shuffle of the full datasets')
print('Shuffling No Forest indexes...')
np.random.shuffle(bigdata_idx_0)
print('Shuffling Forest indexes...')
np.random.shuffle(bigdata_idx_1)
p_bigdata_idx_0 = bigdata_idx_0.copy()
p_bigdata_idx_1 = bigdata_idx_1.copy()
if upsample_required:
if not forest_dominance:
if upsample_amount / cnt_idx_1 > 1:
repetitions = int(upsample_amount / cnt_idx_1)
print('Upsampling Forest indexes %s times' % (str(repetitions)))
if repetitions > 0:
p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions, axis=0)
left_to_complete = upsample_amount % cnt_idx_1
if left_to_complete > 0:
p_bigdata_idx_1 = np.append(p_bigdata_idx_1, p_bigdata_idx_1[:left_to_complete], axis=0)
else:
if upsample_amount / cnt_idx_0 > 1:
repetitions = int(upsample_amount / cnt_idx_0)
print('Upsampling No Forest indexes %s times' % (str(repetitions)))
if repetitions > 0:
p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions, axis=0)
left_to_complete = upsample_amount % cnt_idx_0
if left_to_complete > 0:
p_bigdata_idx_0 = np.append(p_bigdata_idx_0, p_bigdata_idx_0[:left_to_complete], axis=0)
# For each iteration we shuffle, upsample if required and retrieve a percentage of the indexes
for i in range(iterations):
print('Performing shuffle before collecting iteration %d' % i)
print('Shuffling No Forest indexes...')
np.random.shuffle(p_bigdata_idx_0)
print('Shuffling Forest indexes...')
np.random.shuffle(p_bigdata_idx_1)
final_idx_0 = p_bigdata_idx_0[:class_percentage]
final_idx_1 = p_bigdata_idx_1[:class_percentage]
analysis_idx_path = join(analysis_path,
"{:02d}_{:02d}_{:}_samples_factor_idx.npz".format(percentage, i, tactic))
print('Storing data: ' + analysis_idx_path)
np.savez_compressed(analysis_idx_path, bigdata_idx_0=final_idx_0, bigdata_idx_1=final_idx_1)
print('Done!')
def dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):
print('Retrieving datasets...')
rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]
rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())
bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X,
RasterParams.SRTM_MAX_Y), dtype=np.float32)
bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)
for i, pck in enumerate(rasters_folders):
path_to_pck = join(dataset_folder, pck, 'dataset.npz')
print('Loading dataset folder ', pck)
pck_bigdata = None
item_getter = itemgetter('bigdata')
with np.load(path_to_pck) as df:
pck_bigdata = item_getter(df)
bigdata[i] = pck_bigdata
pck_bigdata_gt = None
item_getter = itemgetter('bigdata_gt')
with np.load(path_to_pck) as df:
pck_bigdata_gt = item_getter(df)
bigdata_gt[i] = pck_bigdata_gt
del pck_bigdata
del pck_bigdata_gt
gc.collect()
values_0 = values_1 = edges_0 = edges_1 = percentages_0 = percentages_1 = None
analysis_cntr_path = join(storage_folder, "full_histogram_info.npz")
item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1', 'lower_0', 'lower_1', 'upper_0', 'upper_1',
'lower_outliers_0', 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',
'percentages_0', 'percentages_1')
with np.load(analysis_cntr_path) as df:
values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0, upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0, upper_outliers_1, percentages_0, percentages_1 = item_getter(
df)
print('Procesing percentage sampled index files...\n')
'''partition_range = 2.0 / partitions
bigdata = np.divide(np.add(bigdata, 1.0), partition_range)
gc.collect()
bigdata = bigdata.astype(np.uint32)'''
for percentage in range(beginning, ending, jump):
print('Starting with percentage %d' % percentage)
percentage_idxs_folder = [d for d in listdir(storage_folder) if
not isfile(join(storage_folder, d)) and str(d).endswith("{:02d}p".format(percentage))]
if len(percentage_idxs_folder) != 0:
percentage_idxs_files = [f for f in listdir(join(storage_folder, percentage_idxs_folder[0])) if
isfile(join(storage_folder, percentage_idxs_folder[0], f)) and str(f).endswith(
'factor_idx.npz')]
percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda y: y.lower())
t_values_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_values_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_edges_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_edges_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_lower_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float32)
t_lower_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float32)
t_upper_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float32)
t_upper_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float32)
t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=object)
t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float64)
t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float64)
t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float64)
t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),
dtype=np.float64)
for i, idx_file in enumerate(percentage_idxs_files):
path_to_idx = join(storage_folder, percentage_idxs_folder[0], idx_file)
print('Processing idx file %s' % path_to_idx)
iter_bigdata_idx_0 = iter_bigdata_idx_1 = None
item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')
with np.load(path_to_idx) as df:
iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)
redises = []
threads = list()
band_analyzers = []
for band in range(DatasetConfig.DATASET_LST_BANDS_USED):
redis_db = redis.Redis(db=band)
redis_db.delete('status')
redises.append(redis_db)
band_analyzer = BandAnalyzerThread(band, redis_db, bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,
band, join(storage_folder, percentage_idxs_folder[0]),
edges_0=edges_0, values_0=values_0, lower_0=lower_0,
upper_0=upper_0, lower_outliers_0=lower_outliers_0,
upper_outliers_0=upper_outliers_0, percentages_0=percentages_0,
edges_1=edges_1, values_1=values_1, lower_1=lower_1,
upper_1=upper_1, lower_outliers_1=lower_outliers_1,
upper_outliers_1=upper_outliers_1, percentages_1=percentages_1)
band_analyzers.append(band_analyzer)
t = band_analyzer
threads.append(t)
t.start()
all_thread_processed = False
thrds_processed = [False for t_i in range(len(threads))]
while not all_thread_processed:
# progress_bar(redises, stdscr)
for thrd in range(len(threads)):
if redises[thrd].get('status').decode('utf-8') == SampleSelectorThreadStatus.STATUS_DONE:
if not thrds_processed[thrd]:
analysis_band_path = join(storage_folder, percentage_idxs_folder[0],
"band_{:02d}_cls_{:02d}_histogram_info.npz".format(thrd, 0))
item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper',
'h_lower_outliers', 'h_upper_outliers', 'h_percentages')
with np.load(analysis_band_path) as df:
t_values_0[i, thrd], t_edges_0[i, thrd], t_lower_0[i, thrd], t_upper_0[i, thrd], \
t_lower_outliers_0[i, thrd], t_upper_outliers_0[i, thrd], t_percentages_0[
i, thrd] = item_getter(df)
execution = subprocess.run(['rm', analysis_band_path])
analysis_band_path = join(storage_folder, percentage_idxs_folder[0],
"band_{:02d}_cls_{:02d}_histogram_err.npz".format(thrd, 0))
item_getter = itemgetter('rel_err', 'err_mean', 'err_median')
with np.load(analysis_band_path) as df:
t_rel_err_0[i, thrd], t_err_mean_0[i, thrd], t_err_median_0[i, thrd] = item_getter(
df)
execution = subprocess.run(['rm', analysis_band_path])
analysis_band_path = join(storage_folder, percentage_idxs_folder[0],
"band_{:02d}_cls_{:02d}_histogram_info.npz".format(thrd, 1))
item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper',
'h_lower_outliers', 'h_upper_outliers', 'h_percentages')
with np.load(analysis_band_path) as df:
t_values_1[i, thrd], t_edges_1[i, thrd], t_lower_1[i, thrd], t_upper_1[i, thrd], \
t_lower_outliers_1[i, thrd], t_upper_outliers_1[i, thrd], t_percentages_1[
i, thrd] = item_getter(df)
execution = subprocess.run(['rm', analysis_band_path])
analysis_band_path = join(storage_folder, percentage_idxs_folder[0],
"band_{:02d}_cls_{:02d}_histogram_err.npz".format(thrd, 1))
item_getter = itemgetter('rel_err', 'err_mean', 'err_median')
with np.load(analysis_band_path) as df:
t_rel_err_1[i, thrd], t_err_mean_1[i, thrd], t_err_median_1[i, thrd] = item_getter(
df)
execution = subprocess.run(['rm', analysis_band_path])
thrds_processed[thrd] = True
all_thread_processed = True
for elem in thrds_processed:
if not elem:
all_thread_processed = False
if not all_thread_processed:
time.sleep(1)
analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],
"{:02d}_histogram_info.npz".format(percentage))
print('Storing data: ' + analysis_cntr_path)
np.savez_compressed(analysis_cntr_path, values_0=t_values_0, values_1=t_values_1, edges_0=t_edges_0,
edges_1=t_edges_1, lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,
upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,
lower_outliers_1=t_lower_outliers_1, upper_outliers_0=t_upper_outliers_0,
upper_outliers_1=t_upper_outliers_1, percentages_0=t_percentages_0,
percentages_1=t_percentages_1)
analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],
"{:02d}_histogram_err.npz".format(percentage))
print('Storing data: ' + analysis_cntr_path)
np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0, rel_err_1=t_rel_err_1,
err_mean_0=t_err_mean_0, err_mean_1=t_err_mean_1, err_median_0=t_err_median_0,
err_median_1=t_err_median_1)
print('Done!')
def full_dataset_analyzer(dataset_folder, storage_folder, tactic):
print('Retrieving datasets...')
rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]
rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())
bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X,
RasterParams.SRTM_MAX_Y), dtype=np.float32)
for i, pck in enumerate(rasters_folders):
path_to_pck = join(dataset_folder, pck, 'dataset.npz')
print('Loading dataset folder ', pck)
pck_bigdata = None
item_getter = itemgetter('bigdata')
with np.load(path_to_pck) as df:
pck_bigdata = item_getter(df)
bigdata[i] = pck_bigdata
del pck_bigdata
gc.collect()
print('Checking number of indexes...')
cnt_idx_0 = 0
cnt_idx_1 = 0
for i, pck in enumerate(rasters_folders):
path_to_pck = join(dataset_folder, pck, 'idxs.npz')
pck_data_idx_0 = pck_data_idx_1 = None
item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')
with np.load(path_to_pck) as df:
pck_data_idx_0, pck_data_idx_1 = item_getter(df)
cnt_idx_0 += pck_data_idx_0.shape[0]
cnt_idx_1 += pck_data_idx_1.shape[0]
forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True
class_total = 0
if tactic == TACTIC_UPSAMPLE:
if not forest_dominance:
class_total = cnt_idx_0
else:
class_total = cnt_idx_1
else:
if not forest_dominance:
class_total = cnt_idx_1
else:
class_total = cnt_idx_0
# Retrieving all indexes from the different zones and putting them in memory
bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)
bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)
print('Number of indexes for No Forest: %s' % (str(len(bigdata_idx_0))))
print('Number of indexes for Forest: %s' % (str(len(bigdata_idx_1))))
print('Copying and appending index values...')
current_0_idx = 0
current_1_idx = 0
for i, pck in enumerate(rasters_folders):
path_to_pck = join(dataset_folder, pck, 'idxs.npz')
pck_bigdata_idx_0 = pck_bigdata_idx_1 = None
item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')
with np.load(path_to_pck) as df:
pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)
bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:] = pck_bigdata_idx_0
bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:] = pck_bigdata_idx_1
bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0] = i
bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0] = i
current_0_idx += len(pck_bigdata_idx_0)
current_1_idx += len(pck_bigdata_idx_1)
upsample_required = False
if tactic == TACTIC_UPSAMPLE:
if not forest_dominance:
if class_total > cnt_idx_1:
upsample_required = True
upsample_amount = class_total - cnt_idx_1
else:
if class_total > cnt_idx_0:
upsample_required = True
upsample_amount = class_total - cnt_idx_0
if upsample_required:
if not forest_dominance:
if upsample_amount / cnt_idx_1 > 1:
repetitions = int(upsample_amount / cnt_idx_1)
print('Upsampling Forest indexes %s times' % (str(repetitions)))
if repetitions > 0:
bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)
left_to_complete = upsample_amount % cnt_idx_1
c_bigdata_idx_1 = bigdata_idx_1.copy()
np.random.shuffle(c_bigdata_idx_1)
if left_to_complete > 0:
bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:left_to_complete], axis=0)
else:
if upsample_amount / cnt_idx_0 > 1:
repetitions = int(upsample_amount / cnt_idx_0)
print('Upsampling No Forest indexes %s times' % (str(repetitions)))
if repetitions > 0:
bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)
left_to_complete = upsample_amount % cnt_idx_0
c_bigdata_idx_0 = bigdata_idx_0.copy()
np.random.shuffle(c_bigdata_idx_0)
if left_to_complete > 0:
bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:left_to_complete], axis=0)
analysis_idx_path = join(storage_folder,
"full_{:}_samples_idx.npz".format(tactic))
print('Storing data: ' + analysis_idx_path)
np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0, bigdata_idx_1=bigdata_idx_1)
print('Procesing full dataset distribution sampled index files...\n')
'''partition_range = 2.0 / partitions
bigdata = np.divide(np.add(bigdata, 1.0), partition_range)
gc.collect()
bigdata = bigdata.astype(np.uint32)'''
values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)
lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)
upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)
upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)
lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)
redises = []
threads = list()
band_analyzers = []
for band in range(DatasetConfig.DATASET_LST_BANDS_USED):
redis_db = redis.Redis(db=band)
redis_db.delete('status')
redises.append(redis_db)
band_analyzer = BandAnalyzerThread(band, redis_db, bigdata, bigdata_idx_0, bigdata_idx_1, band, storage_folder)
band_analyzers.append(band_analyzer)
t = band_analyzer
threads.append(t)
t.start()
all_thread_processed = False
thrds_processed = [False for t_i in range(len(threads))]
while not all_thread_processed:
# progress_bar(redises, stdscr)
for thrd in range(len(threads)):
if redises[thrd].get('status').decode('utf-8') == SampleSelectorThreadStatus.STATUS_DONE:
if not thrds_processed[thrd]:
analysis_band_path = join(storage_folder,
"band_{:02d}_cls_{:02d}_histogram_info.npz".format(thrd, 0))
item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper', 'h_lower_outliers',
'h_upper_outliers', 'h_percentages')
with np.load(analysis_band_path) as df:
values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[thrd], lower_outliers_0[thrd], \
upper_outliers_0[thrd], percentages_0[thrd] = item_getter(df)
execution = subprocess.run(['rm', analysis_band_path])
analysis_band_path = join(storage_folder,
"band_{:02d}_cls_{:02d}_histogram_info.npz".format(thrd, 1))
item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper', 'h_lower_outliers',
'h_upper_outliers', 'h_percentages')
with np.load(analysis_band_path) as df:
values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[thrd], lower_outliers_1[thrd], \
upper_outliers_1[thrd], percentages_1[thrd] = item_getter(df)
execution = subprocess.run(['rm', analysis_band_path])
thrds_processed[thrd] = True
all_thread_processed = True
for elem in thrds_processed:
if not elem:
all_thread_processed = False
if not all_thread_processed:
time.sleep(1)
analysis_cntr_path = join(storage_folder, "full_histogram_info.npz")
print('Storing data: ' + analysis_cntr_path)
np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=values_1, edges_0=edges_0, edges_1=edges_1,
lower_0=lower_0, lower_1=lower_1, upper_0=upper_0, upper_1=upper_1,
lower_outliers_0=lower_outliers_0, lower_outliers_1=lower_outliers_1,
upper_outliers_0=upper_outliers_0, upper_outliers_1=upper_outliers_1,
percentages_0=percentages_0, percentages_1=percentages_1)
def analysis_summarizer(storage_folder, beginning, ending, jump):
for percentage in range(beginning, ending, jump):
print('Starting with percentage %d' % percentage)
percentage_idxs_folder = [d for d in listdir(storage_folder) if
not isfile(join(storage_folder, d)) and str(d).endswith("{:02d}p".format(percentage))]
analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],
"{:02d}_histogram_err.npz".format(percentage))
print('Retrieving data: ' + analysis_cntr_path)
err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None
item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0', 'err_median_1')
with np.load(analysis_cntr_path) as df:
err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df)
'''print('Mean errors of different samples for class 0:\n', err_mean_0)
print('Mean errors of different samples for class 1:\n', err_mean_1)
print('Median errors of different samples for class 0:\n', err_median_0)
print('Median errors of different samples for class 1:\n', err_median_0)'''
mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(err_mean_0.shape[1])]
print('Mean error for percentage %d%%, class %02d:\n' % (percentage, 0),
np.array2string(np.array(mean_err_samples), separator='%; ',
formatter={'float_kind': lambda x: "%.03f" % x}, max_line_width=sys.maxsize).strip(
'[').replace(']', '%'))
mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(err_mean_1.shape[1])]
print('Mean error for percentage %d%%, class %02d:\n' % (percentage, 1),
np.array2string(np.array(mean_err_samples), separator='%; ',
formatter={'float_kind': lambda x: "%.03f" % x}, max_line_width=sys.maxsize).strip(
'[').replace(']', '%'))
median_err_samples = [np.median(err_median_0[:, f]) for f in range(err_median_0.shape[1])]
print('Median error for percentage %d%%, class %02d:\n' % (percentage, 0),
np.array2string(np.array(median_err_samples), separator='%; ',
formatter={'float_kind': lambda x: "%.03f" % x}, max_line_width=sys.maxsize).strip(
'[').replace(']', '%'))
median_err_samples = [np.median(err_median_1[:, f]) for f in range(err_median_1.shape[1])]
print('Median error for percentage %d%%, class %02d:\n' % (percentage, 1),
np.array2string(np.array(median_err_samples), separator='%; ',
formatter={'float_kind': lambda x: "%.03f" % x}, max_line_width=sys.maxsize).strip(
'[').replace(']', '%'), '\n\n')
main(sys.argv[1:])
|
normal
|
{
"blob_id": "2b82d66803ae0a0b03204318975d3c122f34f0cf",
"index": 7003,
"step-1": "<mask token>\n\n\ndef main(argv):\n \"\"\"\n Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do\n the expected job.\n\n :param argv: (dictionary) options and values specified in the command line\n \"\"\"\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n try:\n opts, args = getopt.getopt(argv, 'hs:d:t:cafmob:e:j:i:', [\n 'dataset_folder=', 'storage_folder=', 'tactic=', 'create',\n 'analyze', 'full_analyze', 'mix', 'out', 'begin=', 'end=',\n 'jump=', 'iterations='])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit()\n elif opt in ['-s', '--dataset_folder']:\n dataset_folder = arg\n elif opt in ['-d', '--storage_folder']:\n storage_folder = arg\n elif opt in ['-t', '--tactic']:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in ['-c', '--create']:\n operation = OPERATION_CREATE\n elif opt in ['-a', '--analyze']:\n operation = OPERATION_ANALYZE\n elif opt in ['-f', '--full_analyze']:\n operation = OPERATION_FULLANALYZE\n elif opt in ['-m', '--mix']:\n operation = OPERATION_MIX\n elif opt in ['-o', '--summarize']:\n operation = OPERATION_SUMMARIZE\n elif opt in ['-b', '--beginning']:\n beginning = int(arg)\n elif opt in ['-e', '--ending']:\n ending = int(arg)\n elif opt in ['-j', '--jump']:\n jump = int(arg)\n elif opt in ['-i', '--iterations']:\n iterations = int(arg)\n print('Working with dataset folder %s' % dataset_folder)\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending,\n jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n sys.exit()\n\n\ndef indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations):\n sample_rasters_folders = [f for f in listdir(dataset_folder) if not\n isfile(join(dataset_folder, f))]\n sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.\n isdigit, f))))\n print('Folders to work with: ', sample_rasters_folders)\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n for percentage in range(beginning, ending, jump):\n upsample_required = False\n upsample_amount = 0\n class_percentage = None\n if tactic == TACTIC_UPSAMPLE:\n class_percentage = int(class_total * percentage / 100.0)\n if not forest_dominance:\n if class_percentage > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_1\n elif class_percentage > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_0\n else:\n class_percentage = int(class_total * percentage / 100.0)\n folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else\n TACTIC_DOWNSAMPLE) + '-' + str(int(percentage)) + 'p'\n analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)\n if not exists(analysis_path):\n makedirs(analysis_path)\n print('Performing initial shuffle of the full datasets')\n print('Shuffling No Forest indexes...')\n np.random.shuffle(bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(bigdata_idx_1)\n p_bigdata_idx_0 = bigdata_idx_0.copy()\n p_bigdata_idx_1 = bigdata_idx_1.copy()\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n if left_to_complete > 0:\n p_bigdata_idx_1 = np.append(p_bigdata_idx_1,\n p_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n if left_to_complete > 0:\n p_bigdata_idx_0 = np.append(p_bigdata_idx_0,\n p_bigdata_idx_0[:left_to_complete], axis=0)\n for i in range(iterations):\n print('Performing shuffle before collecting iteration %d' % i)\n print('Shuffling No Forest indexes...')\n np.random.shuffle(p_bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(p_bigdata_idx_1)\n final_idx_0 = p_bigdata_idx_0[:class_percentage]\n final_idx_1 = p_bigdata_idx_1[:class_percentage]\n analysis_idx_path = join(analysis_path,\n '{:02d}_{:02d}_{:}_samples_factor_idx.npz'.format(\n percentage, i, tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=\n final_idx_0, bigdata_idx_1=final_idx_1)\n print('Done!')\n\n\ndef dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.\n FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n pck_bigdata_gt = None\n item_getter = itemgetter('bigdata_gt')\n with np.load(path_to_pck) as df:\n pck_bigdata_gt = item_getter(df)\n bigdata_gt[i] = pck_bigdata_gt\n del pck_bigdata\n del pck_bigdata_gt\n gc.collect()\n (values_0) = (values_1) = (edges_0) = (edges_1) = (percentages_0) = (\n percentages_1) = None\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1',\n 'lower_0', 'lower_1', 'upper_0', 'upper_1', 'lower_outliers_0',\n 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',\n 'percentages_0', 'percentages_1')\n with np.load(analysis_cntr_path) as df:\n (values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0,\n upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0,\n upper_outliers_1, percentages_0, percentages_1) = item_getter(df)\n print('Procesing percentage sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n if len(percentage_idxs_folder) != 0:\n percentage_idxs_files = [f for f in listdir(join(storage_folder,\n percentage_idxs_folder[0])) if isfile(join(storage_folder,\n percentage_idxs_folder[0], f)) and str(f).endswith(\n 'factor_idx.npz')]\n percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda\n y: y.lower())\n t_values_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_values_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n for i, idx_file in enumerate(percentage_idxs_files):\n path_to_idx = join(storage_folder, percentage_idxs_folder[0\n ], idx_file)\n print('Processing idx file %s' % path_to_idx)\n iter_bigdata_idx_0 = iter_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_idx) as df:\n iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db,\n bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,\n band, join(storage_folder, percentage_idxs_folder[0\n ]), edges_0=edges_0, values_0=values_0, lower_0=\n lower_0, upper_0=upper_0, lower_outliers_0=\n lower_outliers_0, upper_outliers_0=upper_outliers_0,\n percentages_0=percentages_0, edges_1=edges_1,\n values_1=values_1, lower_1=lower_1, upper_1=upper_1,\n lower_outliers_1=lower_outliers_1, upper_outliers_1\n =upper_outliers_1, percentages_1=percentages_1)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 0))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_0[i, thrd], t_edges_0[i, thrd\n ], t_lower_0[i, thrd], t_upper_0[i,\n thrd], t_lower_outliers_0[i, thrd\n ], t_upper_outliers_0[i, thrd\n ], t_percentages_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 0))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_0[i, thrd], t_err_mean_0[i, thrd\n ], t_err_median_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 1))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_1[i, thrd], t_edges_1[i, thrd\n ], t_lower_1[i, thrd], t_upper_1[i,\n thrd], t_lower_outliers_1[i, thrd\n ], t_upper_outliers_1[i, thrd\n ], t_percentages_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 1))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_1[i, thrd], t_err_mean_1[i, thrd\n ], t_err_median_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_info.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=t_values_0,\n values_1=t_values_1, edges_0=t_edges_0, edges_1=t_edges_1,\n lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,\n upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,\n lower_outliers_1=t_lower_outliers_1, upper_outliers_0=\n t_upper_outliers_0, upper_outliers_1=t_upper_outliers_1,\n percentages_0=t_percentages_0, percentages_1=t_percentages_1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_err.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0,\n rel_err_1=t_rel_err_1, err_mean_0=t_err_mean_0, err_mean_1=\n t_err_mean_1, err_median_0=t_err_median_0, err_median_1=\n t_err_median_1)\n print('Done!')\n\n\ndef full_dataset_analyzer(dataset_folder, storage_folder, tactic):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n del pck_bigdata\n gc.collect()\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n upsample_required = False\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n if class_total > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_1\n elif class_total > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_0\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(repetitions))\n if repetitions > 0:\n bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n c_bigdata_idx_1 = bigdata_idx_1.copy()\n np.random.shuffle(c_bigdata_idx_1)\n if left_to_complete > 0:\n bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:\n left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n c_bigdata_idx_0 = bigdata_idx_0.copy()\n np.random.shuffle(c_bigdata_idx_0)\n if left_to_complete > 0:\n bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:\n left_to_complete], axis=0)\n analysis_idx_path = join(storage_folder, 'full_{:}_samples_idx.npz'.\n format(tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0,\n bigdata_idx_1=bigdata_idx_1)\n print('Procesing full dataset distribution sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata,\n bigdata_idx_0, bigdata_idx_1, band, storage_folder)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[\n thrd], lower_outliers_0[thrd], upper_outliers_0[\n thrd], percentages_0[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[\n thrd], lower_outliers_1[thrd], upper_outliers_1[\n thrd], percentages_1[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=\n values_1, edges_0=edges_0, edges_1=edges_1, lower_0=lower_0,\n lower_1=lower_1, upper_0=upper_0, upper_1=upper_1, lower_outliers_0\n =lower_outliers_0, lower_outliers_1=lower_outliers_1,\n upper_outliers_0=upper_outliers_0, upper_outliers_1=\n upper_outliers_1, percentages_0=percentages_0, percentages_1=\n percentages_1)\n\n\ndef analysis_summarizer(storage_folder, beginning, ending, jump):\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n '{:02d}_histogram_err.npz'.format(percentage))\n print('Retrieving data: ' + analysis_cntr_path)\n err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None\n item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0',\n 'err_median_1')\n with np.load(analysis_cntr_path) as df:\n err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df\n )\n \"\"\"print('Mean errors of different samples for class 0:\n', err_mean_0)\n print('Mean errors of different samples for class 1:\n', err_mean_1)\n print('Median errors of different samples for class 0:\n', err_median_0)\n print('Median errors of different samples for class 1:\n', err_median_0)\"\"\"\n mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(\n err_mean_0.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 0), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(\n err_mean_1.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 1), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_0[:, f]) for f in range(\n err_median_0.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 0), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_1[:, f]) for f in range(\n err_median_1.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 1), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'), '\\n\\n'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('..')\n<mask token>\n\n\ndef main(argv):\n \"\"\"\n Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do\n the expected job.\n\n :param argv: (dictionary) options and values specified in the command line\n \"\"\"\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n try:\n opts, args = getopt.getopt(argv, 'hs:d:t:cafmob:e:j:i:', [\n 'dataset_folder=', 'storage_folder=', 'tactic=', 'create',\n 'analyze', 'full_analyze', 'mix', 'out', 'begin=', 'end=',\n 'jump=', 'iterations='])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit()\n elif opt in ['-s', '--dataset_folder']:\n dataset_folder = arg\n elif opt in ['-d', '--storage_folder']:\n storage_folder = arg\n elif opt in ['-t', '--tactic']:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in ['-c', '--create']:\n operation = OPERATION_CREATE\n elif opt in ['-a', '--analyze']:\n operation = OPERATION_ANALYZE\n elif opt in ['-f', '--full_analyze']:\n operation = OPERATION_FULLANALYZE\n elif opt in ['-m', '--mix']:\n operation = OPERATION_MIX\n elif opt in ['-o', '--summarize']:\n operation = OPERATION_SUMMARIZE\n elif opt in ['-b', '--beginning']:\n beginning = int(arg)\n elif opt in ['-e', '--ending']:\n ending = int(arg)\n elif opt in ['-j', '--jump']:\n jump = int(arg)\n elif opt in ['-i', '--iterations']:\n iterations = int(arg)\n print('Working with dataset folder %s' % dataset_folder)\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending,\n jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n sys.exit()\n\n\ndef indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations):\n sample_rasters_folders = [f for f in listdir(dataset_folder) if not\n isfile(join(dataset_folder, f))]\n sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.\n isdigit, f))))\n print('Folders to work with: ', sample_rasters_folders)\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n for percentage in range(beginning, ending, jump):\n upsample_required = False\n upsample_amount = 0\n class_percentage = None\n if tactic == TACTIC_UPSAMPLE:\n class_percentage = int(class_total * percentage / 100.0)\n if not forest_dominance:\n if class_percentage > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_1\n elif class_percentage > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_0\n else:\n class_percentage = int(class_total * percentage / 100.0)\n folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else\n TACTIC_DOWNSAMPLE) + '-' + str(int(percentage)) + 'p'\n analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)\n if not exists(analysis_path):\n makedirs(analysis_path)\n print('Performing initial shuffle of the full datasets')\n print('Shuffling No Forest indexes...')\n np.random.shuffle(bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(bigdata_idx_1)\n p_bigdata_idx_0 = bigdata_idx_0.copy()\n p_bigdata_idx_1 = bigdata_idx_1.copy()\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n if left_to_complete > 0:\n p_bigdata_idx_1 = np.append(p_bigdata_idx_1,\n p_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n if left_to_complete > 0:\n p_bigdata_idx_0 = np.append(p_bigdata_idx_0,\n p_bigdata_idx_0[:left_to_complete], axis=0)\n for i in range(iterations):\n print('Performing shuffle before collecting iteration %d' % i)\n print('Shuffling No Forest indexes...')\n np.random.shuffle(p_bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(p_bigdata_idx_1)\n final_idx_0 = p_bigdata_idx_0[:class_percentage]\n final_idx_1 = p_bigdata_idx_1[:class_percentage]\n analysis_idx_path = join(analysis_path,\n '{:02d}_{:02d}_{:}_samples_factor_idx.npz'.format(\n percentage, i, tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=\n final_idx_0, bigdata_idx_1=final_idx_1)\n print('Done!')\n\n\ndef dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.\n FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n pck_bigdata_gt = None\n item_getter = itemgetter('bigdata_gt')\n with np.load(path_to_pck) as df:\n pck_bigdata_gt = item_getter(df)\n bigdata_gt[i] = pck_bigdata_gt\n del pck_bigdata\n del pck_bigdata_gt\n gc.collect()\n (values_0) = (values_1) = (edges_0) = (edges_1) = (percentages_0) = (\n percentages_1) = None\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1',\n 'lower_0', 'lower_1', 'upper_0', 'upper_1', 'lower_outliers_0',\n 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',\n 'percentages_0', 'percentages_1')\n with np.load(analysis_cntr_path) as df:\n (values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0,\n upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0,\n upper_outliers_1, percentages_0, percentages_1) = item_getter(df)\n print('Procesing percentage sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n if len(percentage_idxs_folder) != 0:\n percentage_idxs_files = [f for f in listdir(join(storage_folder,\n percentage_idxs_folder[0])) if isfile(join(storage_folder,\n percentage_idxs_folder[0], f)) and str(f).endswith(\n 'factor_idx.npz')]\n percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda\n y: y.lower())\n t_values_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_values_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n for i, idx_file in enumerate(percentage_idxs_files):\n path_to_idx = join(storage_folder, percentage_idxs_folder[0\n ], idx_file)\n print('Processing idx file %s' % path_to_idx)\n iter_bigdata_idx_0 = iter_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_idx) as df:\n iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db,\n bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,\n band, join(storage_folder, percentage_idxs_folder[0\n ]), edges_0=edges_0, values_0=values_0, lower_0=\n lower_0, upper_0=upper_0, lower_outliers_0=\n lower_outliers_0, upper_outliers_0=upper_outliers_0,\n percentages_0=percentages_0, edges_1=edges_1,\n values_1=values_1, lower_1=lower_1, upper_1=upper_1,\n lower_outliers_1=lower_outliers_1, upper_outliers_1\n =upper_outliers_1, percentages_1=percentages_1)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 0))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_0[i, thrd], t_edges_0[i, thrd\n ], t_lower_0[i, thrd], t_upper_0[i,\n thrd], t_lower_outliers_0[i, thrd\n ], t_upper_outliers_0[i, thrd\n ], t_percentages_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 0))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_0[i, thrd], t_err_mean_0[i, thrd\n ], t_err_median_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 1))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_1[i, thrd], t_edges_1[i, thrd\n ], t_lower_1[i, thrd], t_upper_1[i,\n thrd], t_lower_outliers_1[i, thrd\n ], t_upper_outliers_1[i, thrd\n ], t_percentages_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 1))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_1[i, thrd], t_err_mean_1[i, thrd\n ], t_err_median_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_info.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=t_values_0,\n values_1=t_values_1, edges_0=t_edges_0, edges_1=t_edges_1,\n lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,\n upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,\n lower_outliers_1=t_lower_outliers_1, upper_outliers_0=\n t_upper_outliers_0, upper_outliers_1=t_upper_outliers_1,\n percentages_0=t_percentages_0, percentages_1=t_percentages_1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_err.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0,\n rel_err_1=t_rel_err_1, err_mean_0=t_err_mean_0, err_mean_1=\n t_err_mean_1, err_median_0=t_err_median_0, err_median_1=\n t_err_median_1)\n print('Done!')\n\n\ndef full_dataset_analyzer(dataset_folder, storage_folder, tactic):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n del pck_bigdata\n gc.collect()\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n upsample_required = False\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n if class_total > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_1\n elif class_total > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_0\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(repetitions))\n if repetitions > 0:\n bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n c_bigdata_idx_1 = bigdata_idx_1.copy()\n np.random.shuffle(c_bigdata_idx_1)\n if left_to_complete > 0:\n bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:\n left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n c_bigdata_idx_0 = bigdata_idx_0.copy()\n np.random.shuffle(c_bigdata_idx_0)\n if left_to_complete > 0:\n bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:\n left_to_complete], axis=0)\n analysis_idx_path = join(storage_folder, 'full_{:}_samples_idx.npz'.\n format(tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0,\n bigdata_idx_1=bigdata_idx_1)\n print('Procesing full dataset distribution sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata,\n bigdata_idx_0, bigdata_idx_1, band, storage_folder)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[\n thrd], lower_outliers_0[thrd], upper_outliers_0[\n thrd], percentages_0[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[\n thrd], lower_outliers_1[thrd], upper_outliers_1[\n thrd], percentages_1[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=\n values_1, edges_0=edges_0, edges_1=edges_1, lower_0=lower_0,\n lower_1=lower_1, upper_0=upper_0, upper_1=upper_1, lower_outliers_0\n =lower_outliers_0, lower_outliers_1=lower_outliers_1,\n upper_outliers_0=upper_outliers_0, upper_outliers_1=\n upper_outliers_1, percentages_0=percentages_0, percentages_1=\n percentages_1)\n\n\ndef analysis_summarizer(storage_folder, beginning, ending, jump):\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n '{:02d}_histogram_err.npz'.format(percentage))\n print('Retrieving data: ' + analysis_cntr_path)\n err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None\n item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0',\n 'err_median_1')\n with np.load(analysis_cntr_path) as df:\n err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df\n )\n \"\"\"print('Mean errors of different samples for class 0:\n', err_mean_0)\n print('Mean errors of different samples for class 1:\n', err_mean_1)\n print('Median errors of different samples for class 0:\n', err_median_0)\n print('Median errors of different samples for class 1:\n', err_median_0)\"\"\"\n mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(\n err_mean_0.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 0), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(\n err_mean_1.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 1), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_0[:, f]) for f in range(\n err_median_0.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 0), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_1[:, f]) for f in range(\n err_median_1.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 1), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'), '\\n\\n'\n )\n\n\nmain(sys.argv[1:])\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\nTACTIC_UPSAMPLE = 'upsample'\nTACTIC_DOWNSAMPLE = 'downsample'\nTACTIC_NONE = 'none'\nOPERATION_CREATE = 1000\nOPERATION_ANALYZE = 2000\nOPERATION_FULLANALYZE = 2500\nOPERATION_MIX = 3000\nOPERATION_SUMMARIZE = 4000\n\n\ndef main(argv):\n \"\"\"\n Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do\n the expected job.\n\n :param argv: (dictionary) options and values specified in the command line\n \"\"\"\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n try:\n opts, args = getopt.getopt(argv, 'hs:d:t:cafmob:e:j:i:', [\n 'dataset_folder=', 'storage_folder=', 'tactic=', 'create',\n 'analyze', 'full_analyze', 'mix', 'out', 'begin=', 'end=',\n 'jump=', 'iterations='])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit()\n elif opt in ['-s', '--dataset_folder']:\n dataset_folder = arg\n elif opt in ['-d', '--storage_folder']:\n storage_folder = arg\n elif opt in ['-t', '--tactic']:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in ['-c', '--create']:\n operation = OPERATION_CREATE\n elif opt in ['-a', '--analyze']:\n operation = OPERATION_ANALYZE\n elif opt in ['-f', '--full_analyze']:\n operation = OPERATION_FULLANALYZE\n elif opt in ['-m', '--mix']:\n operation = OPERATION_MIX\n elif opt in ['-o', '--summarize']:\n operation = OPERATION_SUMMARIZE\n elif opt in ['-b', '--beginning']:\n beginning = int(arg)\n elif opt in ['-e', '--ending']:\n ending = int(arg)\n elif opt in ['-j', '--jump']:\n jump = int(arg)\n elif opt in ['-i', '--iterations']:\n iterations = int(arg)\n print('Working with dataset folder %s' % dataset_folder)\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending,\n jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n sys.exit()\n\n\ndef indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations):\n sample_rasters_folders = [f for f in listdir(dataset_folder) if not\n isfile(join(dataset_folder, f))]\n sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.\n isdigit, f))))\n print('Folders to work with: ', sample_rasters_folders)\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n for percentage in range(beginning, ending, jump):\n upsample_required = False\n upsample_amount = 0\n class_percentage = None\n if tactic == TACTIC_UPSAMPLE:\n class_percentage = int(class_total * percentage / 100.0)\n if not forest_dominance:\n if class_percentage > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_1\n elif class_percentage > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_0\n else:\n class_percentage = int(class_total * percentage / 100.0)\n folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else\n TACTIC_DOWNSAMPLE) + '-' + str(int(percentage)) + 'p'\n analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)\n if not exists(analysis_path):\n makedirs(analysis_path)\n print('Performing initial shuffle of the full datasets')\n print('Shuffling No Forest indexes...')\n np.random.shuffle(bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(bigdata_idx_1)\n p_bigdata_idx_0 = bigdata_idx_0.copy()\n p_bigdata_idx_1 = bigdata_idx_1.copy()\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n if left_to_complete > 0:\n p_bigdata_idx_1 = np.append(p_bigdata_idx_1,\n p_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n if left_to_complete > 0:\n p_bigdata_idx_0 = np.append(p_bigdata_idx_0,\n p_bigdata_idx_0[:left_to_complete], axis=0)\n for i in range(iterations):\n print('Performing shuffle before collecting iteration %d' % i)\n print('Shuffling No Forest indexes...')\n np.random.shuffle(p_bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(p_bigdata_idx_1)\n final_idx_0 = p_bigdata_idx_0[:class_percentage]\n final_idx_1 = p_bigdata_idx_1[:class_percentage]\n analysis_idx_path = join(analysis_path,\n '{:02d}_{:02d}_{:}_samples_factor_idx.npz'.format(\n percentage, i, tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=\n final_idx_0, bigdata_idx_1=final_idx_1)\n print('Done!')\n\n\ndef dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.\n FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n pck_bigdata_gt = None\n item_getter = itemgetter('bigdata_gt')\n with np.load(path_to_pck) as df:\n pck_bigdata_gt = item_getter(df)\n bigdata_gt[i] = pck_bigdata_gt\n del pck_bigdata\n del pck_bigdata_gt\n gc.collect()\n (values_0) = (values_1) = (edges_0) = (edges_1) = (percentages_0) = (\n percentages_1) = None\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1',\n 'lower_0', 'lower_1', 'upper_0', 'upper_1', 'lower_outliers_0',\n 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',\n 'percentages_0', 'percentages_1')\n with np.load(analysis_cntr_path) as df:\n (values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0,\n upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0,\n upper_outliers_1, percentages_0, percentages_1) = item_getter(df)\n print('Procesing percentage sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n if len(percentage_idxs_folder) != 0:\n percentage_idxs_files = [f for f in listdir(join(storage_folder,\n percentage_idxs_folder[0])) if isfile(join(storage_folder,\n percentage_idxs_folder[0], f)) and str(f).endswith(\n 'factor_idx.npz')]\n percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda\n y: y.lower())\n t_values_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_values_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n for i, idx_file in enumerate(percentage_idxs_files):\n path_to_idx = join(storage_folder, percentage_idxs_folder[0\n ], idx_file)\n print('Processing idx file %s' % path_to_idx)\n iter_bigdata_idx_0 = iter_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_idx) as df:\n iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db,\n bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,\n band, join(storage_folder, percentage_idxs_folder[0\n ]), edges_0=edges_0, values_0=values_0, lower_0=\n lower_0, upper_0=upper_0, lower_outliers_0=\n lower_outliers_0, upper_outliers_0=upper_outliers_0,\n percentages_0=percentages_0, edges_1=edges_1,\n values_1=values_1, lower_1=lower_1, upper_1=upper_1,\n lower_outliers_1=lower_outliers_1, upper_outliers_1\n =upper_outliers_1, percentages_1=percentages_1)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 0))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_0[i, thrd], t_edges_0[i, thrd\n ], t_lower_0[i, thrd], t_upper_0[i,\n thrd], t_lower_outliers_0[i, thrd\n ], t_upper_outliers_0[i, thrd\n ], t_percentages_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 0))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_0[i, thrd], t_err_mean_0[i, thrd\n ], t_err_median_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 1))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_1[i, thrd], t_edges_1[i, thrd\n ], t_lower_1[i, thrd], t_upper_1[i,\n thrd], t_lower_outliers_1[i, thrd\n ], t_upper_outliers_1[i, thrd\n ], t_percentages_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 1))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_1[i, thrd], t_err_mean_1[i, thrd\n ], t_err_median_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_info.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=t_values_0,\n values_1=t_values_1, edges_0=t_edges_0, edges_1=t_edges_1,\n lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,\n upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,\n lower_outliers_1=t_lower_outliers_1, upper_outliers_0=\n t_upper_outliers_0, upper_outliers_1=t_upper_outliers_1,\n percentages_0=t_percentages_0, percentages_1=t_percentages_1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_err.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0,\n rel_err_1=t_rel_err_1, err_mean_0=t_err_mean_0, err_mean_1=\n t_err_mean_1, err_median_0=t_err_median_0, err_median_1=\n t_err_median_1)\n print('Done!')\n\n\ndef full_dataset_analyzer(dataset_folder, storage_folder, tactic):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n del pck_bigdata\n gc.collect()\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n upsample_required = False\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n if class_total > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_1\n elif class_total > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_0\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(repetitions))\n if repetitions > 0:\n bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n c_bigdata_idx_1 = bigdata_idx_1.copy()\n np.random.shuffle(c_bigdata_idx_1)\n if left_to_complete > 0:\n bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:\n left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n c_bigdata_idx_0 = bigdata_idx_0.copy()\n np.random.shuffle(c_bigdata_idx_0)\n if left_to_complete > 0:\n bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:\n left_to_complete], axis=0)\n analysis_idx_path = join(storage_folder, 'full_{:}_samples_idx.npz'.\n format(tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0,\n bigdata_idx_1=bigdata_idx_1)\n print('Procesing full dataset distribution sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata,\n bigdata_idx_0, bigdata_idx_1, band, storage_folder)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[\n thrd], lower_outliers_0[thrd], upper_outliers_0[\n thrd], percentages_0[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[\n thrd], lower_outliers_1[thrd], upper_outliers_1[\n thrd], percentages_1[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=\n values_1, edges_0=edges_0, edges_1=edges_1, lower_0=lower_0,\n lower_1=lower_1, upper_0=upper_0, upper_1=upper_1, lower_outliers_0\n =lower_outliers_0, lower_outliers_1=lower_outliers_1,\n upper_outliers_0=upper_outliers_0, upper_outliers_1=\n upper_outliers_1, percentages_0=percentages_0, percentages_1=\n percentages_1)\n\n\ndef analysis_summarizer(storage_folder, beginning, ending, jump):\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n '{:02d}_histogram_err.npz'.format(percentage))\n print('Retrieving data: ' + analysis_cntr_path)\n err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None\n item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0',\n 'err_median_1')\n with np.load(analysis_cntr_path) as df:\n err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df\n )\n \"\"\"print('Mean errors of different samples for class 0:\n', err_mean_0)\n print('Mean errors of different samples for class 1:\n', err_mean_1)\n print('Median errors of different samples for class 0:\n', err_median_0)\n print('Median errors of different samples for class 1:\n', err_median_0)\"\"\"\n mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(\n err_mean_0.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 0), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(\n err_mean_1.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 1), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_0[:, f]) for f in range(\n err_median_0.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 0), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_1[:, f]) for f in range(\n err_median_1.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 1), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'), '\\n\\n'\n )\n\n\nmain(sys.argv[1:])\n",
"step-4": "import gdal\nimport sys, gc\nsys.path.append('..')\nimport getopt\nimport redis\nimport time\nimport subprocess\nimport numpy as np\nfrom os import listdir, makedirs\nfrom os.path import isfile, join, exists\nfrom operator import itemgetter\nfrom natsort import natsorted\nfrom config import DatasetConfig, RasterParams\nfrom dataset.threads.band_analyzer_thread import BandAnalyzerThread\nfrom dataset.threads.sample_selector_thread_status import SampleSelectorThreadStatus\nTACTIC_UPSAMPLE = 'upsample'\nTACTIC_DOWNSAMPLE = 'downsample'\nTACTIC_NONE = 'none'\nOPERATION_CREATE = 1000\nOPERATION_ANALYZE = 2000\nOPERATION_FULLANALYZE = 2500\nOPERATION_MIX = 3000\nOPERATION_SUMMARIZE = 4000\n\n\ndef main(argv):\n \"\"\"\n Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do\n the expected job.\n\n :param argv: (dictionary) options and values specified in the command line\n \"\"\"\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n try:\n opts, args = getopt.getopt(argv, 'hs:d:t:cafmob:e:j:i:', [\n 'dataset_folder=', 'storage_folder=', 'tactic=', 'create',\n 'analyze', 'full_analyze', 'mix', 'out', 'begin=', 'end=',\n 'jump=', 'iterations='])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>'\n )\n sys.exit()\n elif opt in ['-s', '--dataset_folder']:\n dataset_folder = arg\n elif opt in ['-d', '--storage_folder']:\n storage_folder = arg\n elif opt in ['-t', '--tactic']:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in ['-c', '--create']:\n operation = OPERATION_CREATE\n elif opt in ['-a', '--analyze']:\n operation = OPERATION_ANALYZE\n elif opt in ['-f', '--full_analyze']:\n operation = OPERATION_FULLANALYZE\n elif opt in ['-m', '--mix']:\n operation = OPERATION_MIX\n elif opt in ['-o', '--summarize']:\n operation = OPERATION_SUMMARIZE\n elif opt in ['-b', '--beginning']:\n beginning = int(arg)\n elif opt in ['-e', '--ending']:\n ending = int(arg)\n elif opt in ['-j', '--jump']:\n jump = int(arg)\n elif opt in ['-i', '--iterations']:\n iterations = int(arg)\n print('Working with dataset folder %s' % dataset_folder)\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending,\n jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n sys.exit()\n\n\ndef indexes_creator(dataset_folder, tactic, storage_folder, beginning,\n ending, jump, iterations):\n sample_rasters_folders = [f for f in listdir(dataset_folder) if not\n isfile(join(dataset_folder, f))]\n sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.\n isdigit, f))))\n print('Folders to work with: ', sample_rasters_folders)\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n for percentage in range(beginning, ending, jump):\n upsample_required = False\n upsample_amount = 0\n class_percentage = None\n if tactic == TACTIC_UPSAMPLE:\n class_percentage = int(class_total * percentage / 100.0)\n if not forest_dominance:\n if class_percentage > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_1\n elif class_percentage > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_0\n else:\n class_percentage = int(class_total * percentage / 100.0)\n folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else\n TACTIC_DOWNSAMPLE) + '-' + str(int(percentage)) + 'p'\n analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)\n if not exists(analysis_path):\n makedirs(analysis_path)\n print('Performing initial shuffle of the full datasets')\n print('Shuffling No Forest indexes...')\n np.random.shuffle(bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(bigdata_idx_1)\n p_bigdata_idx_0 = bigdata_idx_0.copy()\n p_bigdata_idx_1 = bigdata_idx_1.copy()\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n if left_to_complete > 0:\n p_bigdata_idx_1 = np.append(p_bigdata_idx_1,\n p_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions,\n axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n if left_to_complete > 0:\n p_bigdata_idx_0 = np.append(p_bigdata_idx_0,\n p_bigdata_idx_0[:left_to_complete], axis=0)\n for i in range(iterations):\n print('Performing shuffle before collecting iteration %d' % i)\n print('Shuffling No Forest indexes...')\n np.random.shuffle(p_bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(p_bigdata_idx_1)\n final_idx_0 = p_bigdata_idx_0[:class_percentage]\n final_idx_1 = p_bigdata_idx_1[:class_percentage]\n analysis_idx_path = join(analysis_path,\n '{:02d}_{:02d}_{:}_samples_factor_idx.npz'.format(\n percentage, i, tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=\n final_idx_0, bigdata_idx_1=final_idx_1)\n print('Done!')\n\n\ndef dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.\n FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n pck_bigdata_gt = None\n item_getter = itemgetter('bigdata_gt')\n with np.load(path_to_pck) as df:\n pck_bigdata_gt = item_getter(df)\n bigdata_gt[i] = pck_bigdata_gt\n del pck_bigdata\n del pck_bigdata_gt\n gc.collect()\n (values_0) = (values_1) = (edges_0) = (edges_1) = (percentages_0) = (\n percentages_1) = None\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1',\n 'lower_0', 'lower_1', 'upper_0', 'upper_1', 'lower_outliers_0',\n 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',\n 'percentages_0', 'percentages_1')\n with np.load(analysis_cntr_path) as df:\n (values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0,\n upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0,\n upper_outliers_1, percentages_0, percentages_1) = item_getter(df)\n print('Procesing percentage sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n if len(percentage_idxs_folder) != 0:\n percentage_idxs_files = [f for f in listdir(join(storage_folder,\n percentage_idxs_folder[0])) if isfile(join(storage_folder,\n percentage_idxs_folder[0], f)) and str(f).endswith(\n 'factor_idx.npz')]\n percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda\n y: y.lower())\n t_values_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_values_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_edges_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_upper_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float32)\n t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=object)\n t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files),\n DatasetConfig.DATASET_LST_BANDS_USED), dtype=np.float64)\n for i, idx_file in enumerate(percentage_idxs_files):\n path_to_idx = join(storage_folder, percentage_idxs_folder[0\n ], idx_file)\n print('Processing idx file %s' % path_to_idx)\n iter_bigdata_idx_0 = iter_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_idx) as df:\n iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db,\n bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,\n band, join(storage_folder, percentage_idxs_folder[0\n ]), edges_0=edges_0, values_0=values_0, lower_0=\n lower_0, upper_0=upper_0, lower_outliers_0=\n lower_outliers_0, upper_outliers_0=upper_outliers_0,\n percentages_0=percentages_0, edges_1=edges_1,\n values_1=values_1, lower_1=lower_1, upper_1=upper_1,\n lower_outliers_1=lower_outliers_1, upper_outliers_1\n =upper_outliers_1, percentages_1=percentages_1)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 0))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_0[i, thrd], t_edges_0[i, thrd\n ], t_lower_0[i, thrd], t_upper_0[i,\n thrd], t_lower_outliers_0[i, thrd\n ], t_upper_outliers_0[i, thrd\n ], t_percentages_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 0))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_0[i, thrd], t_err_mean_0[i, thrd\n ], t_err_median_0[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'\n .format(thrd, 1))\n item_getter = itemgetter('h_values',\n 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers',\n 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_1[i, thrd], t_edges_1[i, thrd\n ], t_lower_1[i, thrd], t_upper_1[i,\n thrd], t_lower_outliers_1[i, thrd\n ], t_upper_outliers_1[i, thrd\n ], t_percentages_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n analysis_band_path = join(storage_folder,\n percentage_idxs_folder[0],\n 'band_{:02d}_cls_{:02d}_histogram_err.npz'\n .format(thrd, 1))\n item_getter = itemgetter('rel_err',\n 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_1[i, thrd], t_err_mean_1[i, thrd\n ], t_err_median_1[i, thrd\n ] = item_getter(df)\n execution = subprocess.run(['rm',\n analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_info.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=t_values_0,\n values_1=t_values_1, edges_0=t_edges_0, edges_1=t_edges_1,\n lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,\n upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,\n lower_outliers_1=t_lower_outliers_1, upper_outliers_0=\n t_upper_outliers_0, upper_outliers_1=t_upper_outliers_1,\n percentages_0=t_percentages_0, percentages_1=t_percentages_1)\n analysis_cntr_path = join(storage_folder,\n percentage_idxs_folder[0], '{:02d}_histogram_err.npz'.\n format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0,\n rel_err_1=t_rel_err_1, err_mean_0=t_err_mean_0, err_mean_1=\n t_err_mean_1, err_median_0=t_err_median_0, err_median_1=\n t_err_median_1)\n print('Done!')\n\n\ndef full_dataset_analyzer(dataset_folder, storage_folder, tactic):\n print('Retrieving datasets...')\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(\n join(dataset_folder, f))]\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.\n DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X, RasterParams.\n SRTM_MAX_Y), dtype=np.float32)\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n print('Loading dataset folder ', pck)\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n bigdata[i] = pck_bigdata\n del pck_bigdata\n gc.collect()\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n elif not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n print('Number of indexes for No Forest: %s' % str(len(bigdata_idx_0)))\n print('Number of indexes for Forest: %s' % str(len(bigdata_idx_1)))\n print('Copying and appending index values...')\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:\n ] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:\n ] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0\n ] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0\n ] = i\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n upsample_required = False\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n if class_total > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_1\n elif class_total > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_0\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % str(repetitions))\n if repetitions > 0:\n bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_1\n c_bigdata_idx_1 = bigdata_idx_1.copy()\n np.random.shuffle(c_bigdata_idx_1)\n if left_to_complete > 0:\n bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:\n left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % str(\n repetitions))\n if repetitions > 0:\n bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)\n left_to_complete = upsample_amount % cnt_idx_0\n c_bigdata_idx_0 = bigdata_idx_0.copy()\n np.random.shuffle(c_bigdata_idx_0)\n if left_to_complete > 0:\n bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:\n left_to_complete], axis=0)\n analysis_idx_path = join(storage_folder, 'full_{:}_samples_idx.npz'.\n format(tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0,\n bigdata_idx_1=bigdata_idx_1)\n print('Procesing full dataset distribution sampled index files...\\n')\n \"\"\"partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)\"\"\"\n values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =object)\n lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype\n =np.float32)\n lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED\n ,), dtype=object)\n percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n redises = []\n threads = list()\n band_analyzers = []\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata,\n bigdata_idx_0, bigdata_idx_1, band, storage_folder)\n band_analyzers.append(band_analyzer)\n t = band_analyzer\n threads.append(t)\n t.start()\n all_thread_processed = False\n thrds_processed = [(False) for t_i in range(len(threads))]\n while not all_thread_processed:\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8'\n ) == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[\n thrd], lower_outliers_0[thrd], upper_outliers_0[\n thrd], percentages_0[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n analysis_band_path = join(storage_folder,\n 'band_{:02d}_cls_{:02d}_histogram_info.npz'.format(\n thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges',\n 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[\n thrd], lower_outliers_1[thrd], upper_outliers_1[\n thrd], percentages_1[thrd] = item_getter(df)\n execution = subprocess.run(['rm', analysis_band_path])\n thrds_processed[thrd] = True\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n if not all_thread_processed:\n time.sleep(1)\n analysis_cntr_path = join(storage_folder, 'full_histogram_info.npz')\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=\n values_1, edges_0=edges_0, edges_1=edges_1, lower_0=lower_0,\n lower_1=lower_1, upper_0=upper_0, upper_1=upper_1, lower_outliers_0\n =lower_outliers_0, lower_outliers_1=lower_outliers_1,\n upper_outliers_0=upper_outliers_0, upper_outliers_1=\n upper_outliers_1, percentages_0=percentages_0, percentages_1=\n percentages_1)\n\n\ndef analysis_summarizer(storage_folder, beginning, ending, jump):\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if not\n isfile(join(storage_folder, d)) and str(d).endswith('{:02d}p'.\n format(percentage))]\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n '{:02d}_histogram_err.npz'.format(percentage))\n print('Retrieving data: ' + analysis_cntr_path)\n err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None\n item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0',\n 'err_median_1')\n with np.load(analysis_cntr_path) as df:\n err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df\n )\n \"\"\"print('Mean errors of different samples for class 0:\n', err_mean_0)\n print('Mean errors of different samples for class 1:\n', err_mean_1)\n print('Median errors of different samples for class 0:\n', err_median_0)\n print('Median errors of different samples for class 1:\n', err_median_0)\"\"\"\n mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(\n err_mean_0.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 0), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(\n err_mean_1.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage,\n 1), np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: '%.03f' % x}, max_line_width\n =sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_0[:, f]) for f in range(\n err_median_0.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 0), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'))\n median_err_samples = [np.median(err_median_1[:, f]) for f in range(\n err_median_1.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (\n percentage, 1), np.array2string(np.array(median_err_samples),\n separator='%; ', formatter={'float_kind': lambda x: '%.03f' % x\n }, max_line_width=sys.maxsize).strip('[').replace(']', '%'), '\\n\\n'\n )\n\n\nmain(sys.argv[1:])\n",
"step-5": "import gdal\nimport sys, gc\n\nsys.path.append(\"..\")\nimport getopt\nimport redis\nimport time\nimport subprocess\nimport numpy as np\nfrom os import listdir, makedirs\nfrom os.path import isfile, join, exists\nfrom operator import itemgetter\nfrom natsort import natsorted\nfrom config import DatasetConfig, RasterParams\nfrom dataset.threads.band_analyzer_thread import BandAnalyzerThread\nfrom dataset.threads.sample_selector_thread_status import SampleSelectorThreadStatus\n\nTACTIC_UPSAMPLE = 'upsample'\nTACTIC_DOWNSAMPLE = 'downsample'\nTACTIC_NONE = 'none'\n\nOPERATION_CREATE = 1000\nOPERATION_ANALYZE = 2000\nOPERATION_FULLANALYZE = 2500\nOPERATION_MIX = 3000\nOPERATION_SUMMARIZE = 4000\n\ndef main(argv):\n \"\"\"\n Main function which shows the usage, retrieves the command line parameters and invokes the required functions to do\n the expected job.\n\n :param argv: (dictionary) options and values specified in the command line\n \"\"\"\n\n print('Preparing for balanced downsampler indexer by factor')\n gdal.UseExceptions()\n dataset_folder = None\n storage_folder = None\n tactic = TACTIC_DOWNSAMPLE\n operation = OPERATION_MIX\n beginning = 5\n ending = 100\n jump = 5\n iterations = 10\n\n try:\n opts, args = getopt.getopt(argv, \"hs:d:t:cafmob:e:j:i:\",\n [\"dataset_folder=\", \"storage_folder=\", \"tactic=\", \"create\", \"analyze\",\n \"full_analyze\", \"mix\", \"out\", \"begin=\", \"end=\", \"jump=\", \"iterations=\"])\n except getopt.GetoptError:\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == \"-h\":\n print(\n 'balanced_factor_indexer.py -s <dataset_folder> -d <storage_folder> -t {upsample/downsample} -m -b <beginning_percentage> -e <ending_percentage -j <jump_between_samples> -i <number_of_iterations>')\n sys.exit()\n elif opt in [\"-s\", \"--dataset_folder\"]:\n dataset_folder = arg\n elif opt in [\"-d\", \"--storage_folder\"]:\n storage_folder = arg\n elif opt in [\"-t\", \"--tactic\"]:\n if arg == 'upsample':\n tactic = TACTIC_UPSAMPLE\n elif arg == 'downsample':\n tactic = TACTIC_DOWNSAMPLE\n else:\n tactic = TACTIC_NONE\n elif opt in [\"-c\", \"--create\"]:\n operation = OPERATION_CREATE\n elif opt in [\"-a\", \"--analyze\"]:\n operation = OPERATION_ANALYZE\n elif opt in [\"-f\", \"--full_analyze\"]:\n operation = OPERATION_FULLANALYZE\n elif opt in [\"-m\", \"--mix\"]:\n operation = OPERATION_MIX\n elif opt in [\"-o\", \"--summarize\"]:\n operation = OPERATION_SUMMARIZE\n elif opt in [\"-b\", \"--beginning\"]:\n beginning = int(arg)\n elif opt in [\"-e\", \"--ending\"]:\n ending = int(arg)\n elif opt in [\"-j\", \"--jump\"]:\n jump = int(arg)\n elif opt in [\"-i\", \"--iterations\"]:\n iterations = int(arg)\n\n print('Working with dataset folder %s' % dataset_folder)\n\n if operation == OPERATION_CREATE or operation == OPERATION_MIX:\n indexes_creator(dataset_folder, tactic, storage_folder, beginning, ending, jump, iterations)\n if operation == OPERATION_FULLANALYZE or operation == OPERATION_MIX:\n full_dataset_analyzer(dataset_folder, storage_folder, tactic)\n if operation == OPERATION_ANALYZE or operation == OPERATION_MIX:\n dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump)\n if operation == OPERATION_SUMMARIZE or operation == OPERATION_MIX:\n analysis_summarizer(storage_folder, beginning, ending, jump)\n\n sys.exit()\n\n\ndef indexes_creator(dataset_folder, tactic, storage_folder, beginning, ending, jump, iterations):\n sample_rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]\n\n sample_rasters_folders.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n\n print('Folders to work with: ', sample_rasters_folders)\n\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n else:\n if not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n\n # Retrieving all indexes from the different zones and putting them in memory\n\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n\n print('Number of indexes for No Forest: %s' % (str(len(bigdata_idx_0))))\n print('Number of indexes for Forest: %s' % (str(len(bigdata_idx_1))))\n\n print('Copying and appending index values...')\n\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(sample_rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0] = i\n\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n\n # Now we go through each percentage sampling\n\n for percentage in range(beginning, ending, jump):\n\n # Calculating sampling amount and determining if upsampling is needed\n upsample_required = False\n upsample_amount = 0\n\n class_percentage = None\n if tactic == TACTIC_UPSAMPLE:\n class_percentage = int(class_total * percentage / 100.0)\n if not forest_dominance:\n if class_percentage > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_1\n else:\n if class_percentage > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_percentage - cnt_idx_0\n else:\n class_percentage = int(class_total * percentage / 100.0)\n\n folder_subfix = (TACTIC_UPSAMPLE if tactic == TACTIC_UPSAMPLE else TACTIC_DOWNSAMPLE) + '-' + str(\n int(percentage)) + 'p'\n\n analysis_path = join(storage_folder, 'train-balanced-' + folder_subfix)\n\n if not exists(analysis_path):\n makedirs(analysis_path)\n\n print('Performing initial shuffle of the full datasets')\n\n print('Shuffling No Forest indexes...')\n np.random.shuffle(bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(bigdata_idx_1)\n\n p_bigdata_idx_0 = bigdata_idx_0.copy()\n p_bigdata_idx_1 = bigdata_idx_1.copy()\n\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % (str(repetitions)))\n\n if repetitions > 0:\n p_bigdata_idx_1 = p_bigdata_idx_1.repeat(repetitions, axis=0)\n\n left_to_complete = upsample_amount % cnt_idx_1\n\n if left_to_complete > 0:\n p_bigdata_idx_1 = np.append(p_bigdata_idx_1, p_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % (str(repetitions)))\n\n if repetitions > 0:\n p_bigdata_idx_0 = p_bigdata_idx_0.repeat(repetitions, axis=0)\n\n left_to_complete = upsample_amount % cnt_idx_0\n\n if left_to_complete > 0:\n p_bigdata_idx_0 = np.append(p_bigdata_idx_0, p_bigdata_idx_0[:left_to_complete], axis=0)\n\n # For each iteration we shuffle, upsample if required and retrieve a percentage of the indexes\n for i in range(iterations):\n print('Performing shuffle before collecting iteration %d' % i)\n\n print('Shuffling No Forest indexes...')\n np.random.shuffle(p_bigdata_idx_0)\n print('Shuffling Forest indexes...')\n np.random.shuffle(p_bigdata_idx_1)\n\n final_idx_0 = p_bigdata_idx_0[:class_percentage]\n final_idx_1 = p_bigdata_idx_1[:class_percentage]\n\n analysis_idx_path = join(analysis_path,\n \"{:02d}_{:02d}_{:}_samples_factor_idx.npz\".format(percentage, i, tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=final_idx_0, bigdata_idx_1=final_idx_1)\n\n print('Done!')\n\n\ndef dataset_analyzer(dataset_folder, storage_folder, beginning, ending, jump):\n print('Retrieving datasets...')\n\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]\n\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X,\n RasterParams.SRTM_MAX_Y), dtype=np.float32)\n bigdata_gt = np.zeros(shape=(len(rasters_folders), RasterParams.FNF_MAX_X, RasterParams.FNF_MAX_Y), dtype=np.uint8)\n\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n\n print('Loading dataset folder ', pck)\n\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n\n bigdata[i] = pck_bigdata\n\n pck_bigdata_gt = None\n item_getter = itemgetter('bigdata_gt')\n with np.load(path_to_pck) as df:\n pck_bigdata_gt = item_getter(df)\n\n bigdata_gt[i] = pck_bigdata_gt\n\n del pck_bigdata\n del pck_bigdata_gt\n\n gc.collect()\n\n values_0 = values_1 = edges_0 = edges_1 = percentages_0 = percentages_1 = None\n analysis_cntr_path = join(storage_folder, \"full_histogram_info.npz\")\n item_getter = itemgetter('values_0', 'values_1', 'edges_0', 'edges_1', 'lower_0', 'lower_1', 'upper_0', 'upper_1',\n 'lower_outliers_0', 'lower_outliers_1', 'upper_outliers_0', 'upper_outliers_1',\n 'percentages_0', 'percentages_1')\n with np.load(analysis_cntr_path) as df:\n values_0, values_1, edges_0, edges_1, lower_0, lower_1, upper_0, upper_1, lower_outliers_0, lower_outliers_1, upper_outliers_0, upper_outliers_1, percentages_0, percentages_1 = item_getter(\n df)\n\n print('Procesing percentage sampled index files...\\n')\n\n '''partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)'''\n\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if\n not isfile(join(storage_folder, d)) and str(d).endswith(\"{:02d}p\".format(percentage))]\n\n if len(percentage_idxs_folder) != 0:\n percentage_idxs_files = [f for f in listdir(join(storage_folder, percentage_idxs_folder[0])) if\n isfile(join(storage_folder, percentage_idxs_folder[0], f)) and str(f).endswith(\n 'factor_idx.npz')]\n\n percentage_idxs_files = natsorted(percentage_idxs_files, key=lambda y: y.lower())\n\n t_values_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_values_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_edges_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_edges_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_lower_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float32)\n t_lower_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float32)\n t_upper_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float32)\n t_upper_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float32)\n t_lower_outliers_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_lower_outliers_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_upper_outliers_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_upper_outliers_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_percentages_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_percentages_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_rel_err_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_rel_err_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=object)\n t_err_mean_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float64)\n t_err_mean_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float64)\n t_err_median_0 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float64)\n t_err_median_1 = np.zeros(shape=(len(percentage_idxs_files), DatasetConfig.DATASET_LST_BANDS_USED,),\n dtype=np.float64)\n\n for i, idx_file in enumerate(percentage_idxs_files):\n path_to_idx = join(storage_folder, percentage_idxs_folder[0], idx_file)\n print('Processing idx file %s' % path_to_idx)\n\n iter_bigdata_idx_0 = iter_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_idx) as df:\n iter_bigdata_idx_0, iter_bigdata_idx_1 = item_getter(df)\n\n redises = []\n threads = list()\n band_analyzers = []\n\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata, iter_bigdata_idx_0, iter_bigdata_idx_1,\n band, join(storage_folder, percentage_idxs_folder[0]),\n edges_0=edges_0, values_0=values_0, lower_0=lower_0,\n upper_0=upper_0, lower_outliers_0=lower_outliers_0,\n upper_outliers_0=upper_outliers_0, percentages_0=percentages_0,\n edges_1=edges_1, values_1=values_1, lower_1=lower_1,\n upper_1=upper_1, lower_outliers_1=lower_outliers_1,\n upper_outliers_1=upper_outliers_1, percentages_1=percentages_1)\n band_analyzers.append(band_analyzer)\n\n t = band_analyzer\n threads.append(t)\n t.start()\n\n all_thread_processed = False\n thrds_processed = [False for t_i in range(len(threads))]\n\n while not all_thread_processed:\n # progress_bar(redises, stdscr)\n\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8') == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder, percentage_idxs_folder[0],\n \"band_{:02d}_cls_{:02d}_histogram_info.npz\".format(thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_0[i, thrd], t_edges_0[i, thrd], t_lower_0[i, thrd], t_upper_0[i, thrd], \\\n t_lower_outliers_0[i, thrd], t_upper_outliers_0[i, thrd], t_percentages_0[\n i, thrd] = item_getter(df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n analysis_band_path = join(storage_folder, percentage_idxs_folder[0],\n \"band_{:02d}_cls_{:02d}_histogram_err.npz\".format(thrd, 0))\n item_getter = itemgetter('rel_err', 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_0[i, thrd], t_err_mean_0[i, thrd], t_err_median_0[i, thrd] = item_getter(\n df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n analysis_band_path = join(storage_folder, percentage_idxs_folder[0],\n \"band_{:02d}_cls_{:02d}_histogram_info.npz\".format(thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper',\n 'h_lower_outliers', 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n t_values_1[i, thrd], t_edges_1[i, thrd], t_lower_1[i, thrd], t_upper_1[i, thrd], \\\n t_lower_outliers_1[i, thrd], t_upper_outliers_1[i, thrd], t_percentages_1[\n i, thrd] = item_getter(df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n analysis_band_path = join(storage_folder, percentage_idxs_folder[0],\n \"band_{:02d}_cls_{:02d}_histogram_err.npz\".format(thrd, 1))\n item_getter = itemgetter('rel_err', 'err_mean', 'err_median')\n with np.load(analysis_band_path) as df:\n t_rel_err_1[i, thrd], t_err_mean_1[i, thrd], t_err_median_1[i, thrd] = item_getter(\n df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n thrds_processed[thrd] = True\n\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n\n if not all_thread_processed:\n time.sleep(1)\n\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n \"{:02d}_histogram_info.npz\".format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=t_values_0, values_1=t_values_1, edges_0=t_edges_0,\n edges_1=t_edges_1, lower_0=t_lower_0, lower_1=t_lower_1, upper_0=t_upper_0,\n upper_1=t_upper_1, lower_outliers_0=t_lower_outliers_0,\n lower_outliers_1=t_lower_outliers_1, upper_outliers_0=t_upper_outliers_0,\n upper_outliers_1=t_upper_outliers_1, percentages_0=t_percentages_0,\n percentages_1=t_percentages_1)\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n \"{:02d}_histogram_err.npz\".format(percentage))\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, rel_err_0=t_rel_err_0, rel_err_1=t_rel_err_1,\n err_mean_0=t_err_mean_0, err_mean_1=t_err_mean_1, err_median_0=t_err_median_0,\n err_median_1=t_err_median_1)\n\n print('Done!')\n\n\ndef full_dataset_analyzer(dataset_folder, storage_folder, tactic):\n print('Retrieving datasets...')\n\n rasters_folders = [f for f in listdir(dataset_folder) if not isfile(join(dataset_folder, f))]\n\n rasters_folders = natsorted(rasters_folders, key=lambda y: y.lower())\n\n bigdata = np.zeros(shape=(len(rasters_folders), DatasetConfig.DATASET_LST_BANDS_USED, RasterParams.SRTM_MAX_X,\n RasterParams.SRTM_MAX_Y), dtype=np.float32)\n\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'dataset.npz')\n\n print('Loading dataset folder ', pck)\n\n pck_bigdata = None\n item_getter = itemgetter('bigdata')\n with np.load(path_to_pck) as df:\n pck_bigdata = item_getter(df)\n\n bigdata[i] = pck_bigdata\n\n del pck_bigdata\n\n gc.collect()\n\n print('Checking number of indexes...')\n cnt_idx_0 = 0\n cnt_idx_1 = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n pck_data_idx_0 = pck_data_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_data_idx_0, pck_data_idx_1 = item_getter(df)\n\n cnt_idx_0 += pck_data_idx_0.shape[0]\n cnt_idx_1 += pck_data_idx_1.shape[0]\n\n forest_dominance = False if cnt_idx_0 > cnt_idx_1 else True\n\n class_total = 0\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n class_total = cnt_idx_0\n else:\n class_total = cnt_idx_1\n else:\n if not forest_dominance:\n class_total = cnt_idx_1\n else:\n class_total = cnt_idx_0\n\n # Retrieving all indexes from the different zones and putting them in memory\n\n bigdata_idx_0 = np.empty(shape=(cnt_idx_0 + 1, 3), dtype=np.uint16)\n bigdata_idx_1 = np.empty(shape=(cnt_idx_1 + 1, 3), dtype=np.uint16)\n\n print('Number of indexes for No Forest: %s' % (str(len(bigdata_idx_0))))\n print('Number of indexes for Forest: %s' % (str(len(bigdata_idx_1))))\n\n print('Copying and appending index values...')\n\n current_0_idx = 0\n current_1_idx = 0\n for i, pck in enumerate(rasters_folders):\n path_to_pck = join(dataset_folder, pck, 'idxs.npz')\n\n pck_bigdata_idx_0 = pck_bigdata_idx_1 = None\n item_getter = itemgetter('bigdata_idx_0', 'bigdata_idx_1')\n with np.load(path_to_pck) as df:\n pck_bigdata_idx_0, pck_bigdata_idx_1 = item_getter(df)\n\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 1:] = pck_bigdata_idx_0\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 1:] = pck_bigdata_idx_1\n bigdata_idx_0[current_0_idx:current_0_idx + len(pck_bigdata_idx_0), 0] = i\n bigdata_idx_1[current_1_idx:current_1_idx + len(pck_bigdata_idx_1), 0] = i\n\n current_0_idx += len(pck_bigdata_idx_0)\n current_1_idx += len(pck_bigdata_idx_1)\n\n upsample_required = False\n if tactic == TACTIC_UPSAMPLE:\n if not forest_dominance:\n if class_total > cnt_idx_1:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_1\n else:\n if class_total > cnt_idx_0:\n upsample_required = True\n upsample_amount = class_total - cnt_idx_0\n\n if upsample_required:\n if not forest_dominance:\n if upsample_amount / cnt_idx_1 > 1:\n repetitions = int(upsample_amount / cnt_idx_1)\n print('Upsampling Forest indexes %s times' % (str(repetitions)))\n\n if repetitions > 0:\n bigdata_idx_1 = bigdata_idx_1.repeat(repetitions, axis=0)\n\n left_to_complete = upsample_amount % cnt_idx_1\n\n c_bigdata_idx_1 = bigdata_idx_1.copy()\n np.random.shuffle(c_bigdata_idx_1)\n\n if left_to_complete > 0:\n bigdata_idx_1 = np.append(bigdata_idx_1, c_bigdata_idx_1[:left_to_complete], axis=0)\n else:\n if upsample_amount / cnt_idx_0 > 1:\n repetitions = int(upsample_amount / cnt_idx_0)\n print('Upsampling No Forest indexes %s times' % (str(repetitions)))\n\n if repetitions > 0:\n bigdata_idx_0 = bigdata_idx_0.repeat(repetitions, axis=0)\n\n left_to_complete = upsample_amount % cnt_idx_0\n\n c_bigdata_idx_0 = bigdata_idx_0.copy()\n np.random.shuffle(c_bigdata_idx_0)\n\n if left_to_complete > 0:\n bigdata_idx_0 = np.append(bigdata_idx_0, c_bigdata_idx_0[:left_to_complete], axis=0)\n\n analysis_idx_path = join(storage_folder,\n \"full_{:}_samples_idx.npz\".format(tactic))\n print('Storing data: ' + analysis_idx_path)\n np.savez_compressed(analysis_idx_path, bigdata_idx_0=bigdata_idx_0, bigdata_idx_1=bigdata_idx_1)\n\n print('Procesing full dataset distribution sampled index files...\\n')\n\n '''partition_range = 2.0 / partitions\n\n bigdata = np.divide(np.add(bigdata, 1.0), partition_range)\n gc.collect()\n bigdata = bigdata.astype(np.uint32)'''\n\n values_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n values_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n edges_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n edges_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n lower_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)\n lower_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)\n upper_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)\n upper_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=np.float32)\n lower_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n lower_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n upper_outliers_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n upper_outliers_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n percentages_0 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n percentages_1 = np.zeros(shape=(DatasetConfig.DATASET_LST_BANDS_USED,), dtype=object)\n\n redises = []\n threads = list()\n band_analyzers = []\n\n for band in range(DatasetConfig.DATASET_LST_BANDS_USED):\n redis_db = redis.Redis(db=band)\n redis_db.delete('status')\n redises.append(redis_db)\n\n band_analyzer = BandAnalyzerThread(band, redis_db, bigdata, bigdata_idx_0, bigdata_idx_1, band, storage_folder)\n band_analyzers.append(band_analyzer)\n\n t = band_analyzer\n threads.append(t)\n t.start()\n\n all_thread_processed = False\n thrds_processed = [False for t_i in range(len(threads))]\n\n while not all_thread_processed:\n # progress_bar(redises, stdscr)\n\n for thrd in range(len(threads)):\n if redises[thrd].get('status').decode('utf-8') == SampleSelectorThreadStatus.STATUS_DONE:\n if not thrds_processed[thrd]:\n analysis_band_path = join(storage_folder,\n \"band_{:02d}_cls_{:02d}_histogram_info.npz\".format(thrd, 0))\n item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_0[thrd], edges_0[thrd], lower_0[thrd], upper_0[thrd], lower_outliers_0[thrd], \\\n upper_outliers_0[thrd], percentages_0[thrd] = item_getter(df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n analysis_band_path = join(storage_folder,\n \"band_{:02d}_cls_{:02d}_histogram_info.npz\".format(thrd, 1))\n item_getter = itemgetter('h_values', 'h_edges', 'h_lower', 'h_upper', 'h_lower_outliers',\n 'h_upper_outliers', 'h_percentages')\n with np.load(analysis_band_path) as df:\n values_1[thrd], edges_1[thrd], lower_1[thrd], upper_1[thrd], lower_outliers_1[thrd], \\\n upper_outliers_1[thrd], percentages_1[thrd] = item_getter(df)\n\n execution = subprocess.run(['rm', analysis_band_path])\n\n thrds_processed[thrd] = True\n\n all_thread_processed = True\n for elem in thrds_processed:\n if not elem:\n all_thread_processed = False\n\n if not all_thread_processed:\n time.sleep(1)\n\n analysis_cntr_path = join(storage_folder, \"full_histogram_info.npz\")\n print('Storing data: ' + analysis_cntr_path)\n np.savez_compressed(analysis_cntr_path, values_0=values_0, values_1=values_1, edges_0=edges_0, edges_1=edges_1,\n lower_0=lower_0, lower_1=lower_1, upper_0=upper_0, upper_1=upper_1,\n lower_outliers_0=lower_outliers_0, lower_outliers_1=lower_outliers_1,\n upper_outliers_0=upper_outliers_0, upper_outliers_1=upper_outliers_1,\n percentages_0=percentages_0, percentages_1=percentages_1)\n\n\ndef analysis_summarizer(storage_folder, beginning, ending, jump):\n for percentage in range(beginning, ending, jump):\n print('Starting with percentage %d' % percentage)\n percentage_idxs_folder = [d for d in listdir(storage_folder) if\n not isfile(join(storage_folder, d)) and str(d).endswith(\"{:02d}p\".format(percentage))]\n\n analysis_cntr_path = join(storage_folder, percentage_idxs_folder[0],\n \"{:02d}_histogram_err.npz\".format(percentage))\n print('Retrieving data: ' + analysis_cntr_path)\n\n err_mean_0 = err_mean_1 = err_median_0 = err_median_1 = None\n item_getter = itemgetter('err_mean_0', 'err_mean_1', 'err_median_0', 'err_median_1')\n with np.load(analysis_cntr_path) as df:\n err_mean_0, err_mean_1, err_median_0, err_median_1 = item_getter(df)\n\n '''print('Mean errors of different samples for class 0:\\n', err_mean_0)\n print('Mean errors of different samples for class 1:\\n', err_mean_1)\n print('Median errors of different samples for class 0:\\n', err_median_0)\n print('Median errors of different samples for class 1:\\n', err_median_0)'''\n mean_err_samples = [np.mean(err_mean_0[:, f]) for f in range(err_mean_0.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage, 0),\n np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: \"%.03f\" % x}, max_line_width=sys.maxsize).strip(\n '[').replace(']', '%'))\n mean_err_samples = [np.mean(err_mean_1[:, f]) for f in range(err_mean_1.shape[1])]\n print('Mean error for percentage %d%%, class %02d:\\n' % (percentage, 1),\n np.array2string(np.array(mean_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: \"%.03f\" % x}, max_line_width=sys.maxsize).strip(\n '[').replace(']', '%'))\n median_err_samples = [np.median(err_median_0[:, f]) for f in range(err_median_0.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (percentage, 0),\n np.array2string(np.array(median_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: \"%.03f\" % x}, max_line_width=sys.maxsize).strip(\n '[').replace(']', '%'))\n median_err_samples = [np.median(err_median_1[:, f]) for f in range(err_median_1.shape[1])]\n print('Median error for percentage %d%%, class %02d:\\n' % (percentage, 1),\n np.array2string(np.array(median_err_samples), separator='%; ',\n formatter={'float_kind': lambda x: \"%.03f\" % x}, max_line_width=sys.maxsize).strip(\n '[').replace(']', '%'), '\\n\\n')\n\n\nmain(sys.argv[1:])\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@app.before_first_request
def create_tables():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.init_app(app)
<|reserved_special_token_0|>
@app.before_first_request
def create_tables():
pass
add_module_gamma(api)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
api = Api(app)
db.init_app(app)
migrate = Migrate(app, db)
@app.before_first_request
def create_tables():
pass
add_module_gamma(api)
<|reserved_special_token_1|>
from os import environ
from flask import Flask
from flask_restful import Api
from flask_migrate import Migrate
from applications.db import db
from applications.gamma_api import add_module_gamma
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PROPAGATE_EXCEPTIONS'] = True
api = Api(app)
db.init_app(app)
migrate = Migrate(app, db)
@app.before_first_request
def create_tables():
pass
add_module_gamma(api)
<|reserved_special_token_1|>
from os import environ
from flask import Flask
from flask_restful import Api
from flask_migrate import Migrate
from applications.db import db
from applications.gamma_api import add_module_gamma
app = Flask(__name__)
app.config["DEBUG"] = True
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["PROPAGATE_EXCEPTIONS"] = True
api = Api(app)
db.init_app(app)
migrate = Migrate(app, db)
@app.before_first_request
def create_tables():
pass
# db.create_all()
add_module_gamma(api)
|
flexible
|
{
"blob_id": "fbb081fd52b14336ab4537bb795105bcd6a03070",
"index": 3045,
"step-1": "<mask token>\n\n\[email protected]_first_request\ndef create_tables():\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\ndb.init_app(app)\n<mask token>\n\n\[email protected]_first_request\ndef create_tables():\n pass\n\n\nadd_module_gamma(api)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['PROPAGATE_EXCEPTIONS'] = True\napi = Api(app)\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n\[email protected]_first_request\ndef create_tables():\n pass\n\n\nadd_module_gamma(api)\n",
"step-4": "from os import environ\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_migrate import Migrate\nfrom applications.db import db\nfrom applications.gamma_api import add_module_gamma\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['PROPAGATE_EXCEPTIONS'] = True\napi = Api(app)\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n\[email protected]_first_request\ndef create_tables():\n pass\n\n\nadd_module_gamma(api)\n",
"step-5": "from os import environ\n\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_migrate import Migrate\n\nfrom applications.db import db\nfrom applications.gamma_api import add_module_gamma\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"PROPAGATE_EXCEPTIONS\"] = True\n\napi = Api(app)\n\ndb.init_app(app)\n\nmigrate = Migrate(app, db)\n\n\[email protected]_first_request\ndef create_tables():\n pass\n # db.create_all()\n\n\nadd_module_gamma(api)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding:utf-8 -*-
'''
Created on 2016��4��8��
@author: liping
'''
import sys
from PyQt4 import QtGui,QtCore
class QuitButton(QtGui.QWidget):
def __init__(self,parent = None):
QtGui.QWidget.__init__(self,parent)
self.setGeometry(300,300,250,150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close',self)
quit.setGeometry(100,100,60,35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))
app = QtGui.QApplication(sys.argv)
qb = QuitButton()
qb.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "5a3431b79b8f42b3042bb27d787d0d92891a7415",
"index": 3947,
"step-1": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\n<mask token>\nqb.show()\nsys.exit(app.exec_())\n",
"step-3": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n",
"step-4": "<mask token>\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n",
"step-5": "# -*- coding:utf-8 -*-\n'''\nCreated on 2016��4��8��\n\n@author: liping\n'''\n\nimport sys\nfrom PyQt4 import QtGui,QtCore\n\nclass QuitButton(QtGui.QWidget):\n def __init__(self,parent = None):\n QtGui.QWidget.__init__(self,parent)\n \n self.setGeometry(300,300,250,150)\n self.setWindowTitle('quitButton')\n \n quit = QtGui.QPushButton('Close',self)\n quit.setGeometry(100,100,60,35)\n \n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))\n \napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Pastebin(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pastebin(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pastebin(models.Model):
name = models.CharField(max_length=30)
textpaste = models.CharField(max_length=80)
pasteurl = models.AutoField(primary_key=True)
def __str__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
class Pastebin(models.Model):
name = models.CharField(max_length=30)
textpaste = models.CharField(max_length=80)
pasteurl = models.AutoField(primary_key=True)
def __str__(self):
return self.name
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Pastebin(models.Model):
name= models.CharField(max_length=30)
textpaste = models.CharField(max_length=80)
pasteurl = models.AutoField(primary_key=True)
def __str__(self):
return self.name
|
flexible
|
{
"blob_id": "3badf65a5301cc9cf26811e3989631aec5d31910",
"index": 2709,
"step-1": "<mask token>\n\n\nclass Pastebin(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Pastebin(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Pastebin(models.Model):\n name = models.CharField(max_length=30)\n textpaste = models.CharField(max_length=80)\n pasteurl = models.AutoField(primary_key=True)\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Pastebin(models.Model):\n name = models.CharField(max_length=30)\n textpaste = models.CharField(max_length=80)\n pasteurl = models.AutoField(primary_key=True)\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Pastebin(models.Model):\n\tname= models.CharField(max_length=30)\n\ttextpaste = models.CharField(max_length=80)\n\tpasteurl = models.AutoField(primary_key=True)\n\t\n\n\tdef __str__(self):\n\t\treturn self.name\n\t",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class LabeledEntry(sw.LabeledWidget):
<|reserved_special_token_0|>
@property
def value(self):
return self.get()
<|reserved_special_token_0|>
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == 'all'
if show_all or 'entry' in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
<|reserved_special_token_0|>
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
entry = options['entry']
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
super().config(**kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LabeledEntry(sw.LabeledWidget):
<|reserved_special_token_0|>
@property
def value(self):
return self.get()
<|reserved_special_token_0|>
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == 'all'
if show_all or 'entry' in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
entry = options['entry']
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
super().config(**kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LabeledEntry(sw.LabeledWidget):
def __init__(self, parent, *args, **kwargs):
"""Initialize the instance"""
class_ = kwargs.pop('class_', 'MLabeledEntry')
super().__init__(parent, class_=class_)
interior = self.interior
justify = kwargs.pop('justify', tk.LEFT)
entrywidth = kwargs.pop('width', 15)
self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)
self.entry.grid(row=0, column=0, sticky=tk.EW)
self.interior = ttk.Frame(interior)
self.interior.grid(row=0, column=1, sticky=tk.NSEW)
interior.columnconfigure(0, weight=1)
self.config(**kwargs)
@property
def value(self):
return self.get()
@value.setter
def value(self, value):
self.set(value)
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == 'all'
if show_all or 'entry' in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
entry = options['entry']
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
super().config(**kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
options = {'entry': {'class_': 'class_', 'cursor': 'cursor',
'exportselection': 'exportselection', 'font': 'font', 'invalidcommand':
'invalidcommand', 'justify': 'justify', 'show': 'show', 'style':
'style', 'takefocus': 'takefocus', 'variable': 'textvariable',
'validate': 'validate', 'validatecommand': 'validatecommand', 'width':
'width', 'xscrollcommand': 'xscrollcommand'}}
class LabeledEntry(sw.LabeledWidget):
def __init__(self, parent, *args, **kwargs):
"""Initialize the instance"""
class_ = kwargs.pop('class_', 'MLabeledEntry')
super().__init__(parent, class_=class_)
interior = self.interior
justify = kwargs.pop('justify', tk.LEFT)
entrywidth = kwargs.pop('width', 15)
self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)
self.entry.grid(row=0, column=0, sticky=tk.EW)
self.interior = ttk.Frame(interior)
self.interior.grid(row=0, column=1, sticky=tk.NSEW)
interior.columnconfigure(0, weight=1)
self.config(**kwargs)
@property
def value(self):
return self.get()
@value.setter
def value(self, value):
self.set(value)
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == 'all'
if show_all or 'entry' in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
entry = options['entry']
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
super().config(**kwargs)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Labeled entry widget.
The goal of these widgets is twofold: to make it easier for developers
to implement dialogs with compound widgets, and to naturally
standardize the user interface presented to the user.
"""
import logging
import seamm_widgets as sw
import tkinter as tk
import tkinter.ttk as ttk
logger = logging.getLogger(__name__)
options = {
"entry": {
"class_": "class_",
"cursor": "cursor",
"exportselection": "exportselection",
"font": "font",
"invalidcommand": "invalidcommand",
"justify": "justify",
"show": "show",
"style": "style",
"takefocus": "takefocus",
"variable": "textvariable",
"validate": "validate",
"validatecommand": "validatecommand",
"width": "width",
"xscrollcommand": "xscrollcommand",
},
}
class LabeledEntry(sw.LabeledWidget):
def __init__(self, parent, *args, **kwargs):
"""Initialize the instance"""
class_ = kwargs.pop("class_", "MLabeledEntry")
super().__init__(parent, class_=class_)
interior = self.interior
# entry
justify = kwargs.pop("justify", tk.LEFT)
entrywidth = kwargs.pop("width", 15)
self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)
self.entry.grid(row=0, column=0, sticky=tk.EW)
# interior frame
self.interior = ttk.Frame(interior)
self.interior.grid(row=0, column=1, sticky=tk.NSEW)
interior.columnconfigure(0, weight=1)
self.config(**kwargs)
@property
def value(self):
return self.get()
@value.setter
def value(self, value):
self.set(value)
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == "all"
if show_all or "entry" in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
# our options that we deal with
entry = options["entry"]
# cannot modify kwargs while iterating over it...
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
# having removed our options, pass rest to parent
super().config(**kwargs)
|
flexible
|
{
"blob_id": "111186f1d45b9cf3bf9065c7fa83a8f3f796bbe1",
"index": 5841,
"step-1": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n <mask token>\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-2": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-3": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\noptions = {'entry': {'class_': 'class_', 'cursor': 'cursor',\n 'exportselection': 'exportselection', 'font': 'font', 'invalidcommand':\n 'invalidcommand', 'justify': 'justify', 'show': 'show', 'style':\n 'style', 'takefocus': 'takefocus', 'variable': 'textvariable',\n 'validate': 'validate', 'validatecommand': 'validatecommand', 'width':\n 'width', 'xscrollcommand': 'xscrollcommand'}}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"Labeled entry widget.\n\nThe goal of these widgets is twofold: to make it easier for developers\nto implement dialogs with compound widgets, and to naturally\nstandardize the user interface presented to the user.\n\"\"\"\n\nimport logging\nimport seamm_widgets as sw\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nlogger = logging.getLogger(__name__)\n\noptions = {\n \"entry\": {\n \"class_\": \"class_\",\n \"cursor\": \"cursor\",\n \"exportselection\": \"exportselection\",\n \"font\": \"font\",\n \"invalidcommand\": \"invalidcommand\",\n \"justify\": \"justify\",\n \"show\": \"show\",\n \"style\": \"style\",\n \"takefocus\": \"takefocus\",\n \"variable\": \"textvariable\",\n \"validate\": \"validate\",\n \"validatecommand\": \"validatecommand\",\n \"width\": \"width\",\n \"xscrollcommand\": \"xscrollcommand\",\n },\n}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop(\"class_\", \"MLabeledEntry\")\n super().__init__(parent, class_=class_)\n\n interior = self.interior\n\n # entry\n justify = kwargs.pop(\"justify\", tk.LEFT)\n entrywidth = kwargs.pop(\"width\", 15)\n\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n\n # interior frame\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n\n interior.columnconfigure(0, weight=1)\n\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n\n super().show(*args)\n\n show_all = len(args) == 0 or args[0] == \"all\"\n\n if show_all or \"entry\" in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n\n self.entry.delete(0, tk.END)\n if value is None:\n return\n\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n\n # our options that we deal with\n entry = options[\"entry\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
class Client:
def __init__(self, id, color):
self.id = id
self.arrival = {}
self.leave = {}
self.server = {}
self.queue = 0
self.served = 0
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_served(self, served):
self.served = served
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Client:
def __init__(self, id, color):
self.id = id
self.arrival = {}
self.leave = {}
self.server = {}
self.queue = 0
self.served = 0
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
def set_server(self, server):
self.server[self.queue] = server
<|reserved_special_token_0|>
def set_served(self, served):
self.served = served
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Client:
def __init__(self, id, color):
self.id = id
self.arrival = {}
self.leave = {}
self.server = {}
self.queue = 0
self.served = 0
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
def set_server(self, server):
self.server[self.queue] = server
def set_queue(self, queue):
self.queue = queue
def set_served(self, served):
self.served = served
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Client:
def __init__(self, id, color):
self.id = id
self.arrival = {}
self.leave = {}
self.server = {}
self.queue = 0
self.served = 0
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
def set_server(self, server):
self.server[self.queue] = server
def set_queue(self, queue):
self.queue = queue
def set_served(self, served):
self.served = served
def wait(self, queue):
return self.leave[queue] - self.arrival[queue]
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
# Classe com os dados de um cliente que entra no sistema simulado.
class Client:
def __init__(self, id, color):
# Identificador do cliente, usada para o teste de correção.
self.id = id
# Tempo de chegada ao servidor (fila 1 e fila 2)
self.arrival = {}
# Tempo de saída do servidor (fila 1 e fila 2)
self.leave = {}
# Tempo no servidor (fila 1 e fila 2)
self.server = {}
# Indicador que diz qual fila o cliente está no momento
self.queue = 0
# Indicador que diz se o cliente já foi servido e saiu do sistema
self.served = 0
# Cor do cliente (TRANSIENT e EQUILIBRIUM)
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
def set_server(self, server):
self.server[self.queue] = server
def set_queue(self, queue):
self.queue = queue
def set_served(self, served):
self.served = served
# Tempo de espera na fila = Tempo de saída da fila para o servidor - Tempo de chegada na fila.
def wait(self, queue):
return (self.leave[queue] - self.arrival[queue])
|
flexible
|
{
"blob_id": "5dc201f743705d6a57dfb61ec2cc2a827db0ba25",
"index": 7234,
"step-1": "class Client:\n\n def __init__(self, id, color):\n self.id = id\n self.arrival = {}\n self.leave = {}\n self.server = {}\n self.queue = 0\n self.served = 0\n self.color = color\n\n def set_arrival(self, arrival):\n self.arrival[self.queue] = arrival\n\n def set_leave(self, leave):\n self.leave[self.queue] = leave\n <mask token>\n <mask token>\n\n def set_served(self, served):\n self.served = served\n <mask token>\n",
"step-2": "class Client:\n\n def __init__(self, id, color):\n self.id = id\n self.arrival = {}\n self.leave = {}\n self.server = {}\n self.queue = 0\n self.served = 0\n self.color = color\n\n def set_arrival(self, arrival):\n self.arrival[self.queue] = arrival\n\n def set_leave(self, leave):\n self.leave[self.queue] = leave\n\n def set_server(self, server):\n self.server[self.queue] = server\n <mask token>\n\n def set_served(self, served):\n self.served = served\n <mask token>\n",
"step-3": "class Client:\n\n def __init__(self, id, color):\n self.id = id\n self.arrival = {}\n self.leave = {}\n self.server = {}\n self.queue = 0\n self.served = 0\n self.color = color\n\n def set_arrival(self, arrival):\n self.arrival[self.queue] = arrival\n\n def set_leave(self, leave):\n self.leave[self.queue] = leave\n\n def set_server(self, server):\n self.server[self.queue] = server\n\n def set_queue(self, queue):\n self.queue = queue\n\n def set_served(self, served):\n self.served = served\n <mask token>\n",
"step-4": "class Client:\n\n def __init__(self, id, color):\n self.id = id\n self.arrival = {}\n self.leave = {}\n self.server = {}\n self.queue = 0\n self.served = 0\n self.color = color\n\n def set_arrival(self, arrival):\n self.arrival[self.queue] = arrival\n\n def set_leave(self, leave):\n self.leave[self.queue] = leave\n\n def set_server(self, server):\n self.server[self.queue] = server\n\n def set_queue(self, queue):\n self.queue = queue\n\n def set_served(self, served):\n self.served = served\n\n def wait(self, queue):\n return self.leave[queue] - self.arrival[queue]\n",
"step-5": "# -*- coding:utf-8 -*-\n# Classe com os dados de um cliente que entra no sistema simulado.\n\nclass Client:\n def __init__(self, id, color):\n # Identificador do cliente, usada para o teste de correção.\n self.id = id\n # Tempo de chegada ao servidor (fila 1 e fila 2)\n self.arrival = {}\n # Tempo de saída do servidor (fila 1 e fila 2)\n self.leave = {}\n # Tempo no servidor (fila 1 e fila 2)\n self.server = {}\n # Indicador que diz qual fila o cliente está no momento\n self.queue = 0\n # Indicador que diz se o cliente já foi servido e saiu do sistema\n self.served = 0\n # Cor do cliente (TRANSIENT e EQUILIBRIUM)\n self.color = color\n\n def set_arrival(self, arrival):\n self.arrival[self.queue] = arrival\n\n def set_leave(self, leave):\n self.leave[self.queue] = leave\n\n def set_server(self, server):\n self.server[self.queue] = server\n\n def set_queue(self, queue):\n self.queue = queue\n \n def set_served(self, served):\n self.served = served\n\n # Tempo de espera na fila = Tempo de saída da fila para o servidor - Tempo de chegada na fila.\n def wait(self, queue):\n return (self.leave[queue] - self.arrival[queue])\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# -*- coding:utf-8 -*-
import math
r = float(input())
print("{0:f} {1:f}".format(r*r*math.pi,2*r*math.pi))
|
normal
|
{
"blob_id": "e28cca2273e1c3ad4b8a955843e7dfb45c00694c",
"index": 3246,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-3": "<mask token>\nr = float(input())\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-4": "import math\nr = float(input())\nprint('{0:f} {1:f}'.format(r * r * math.pi, 2 * r * math.pi))\n",
"step-5": "# -*- coding:utf-8 -*-\nimport math\nr = float(input())\nprint(\"{0:f} {1:f}\".format(r*r*math.pi,2*r*math.pi))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
soup.find('td', {'style': 'color:red'}).text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
}
list_req = requests.post(url, headers=headers)
soup = BeautifulSoup(list_req.content, 'html.parser')
soup.find('td', {'style': 'color:red'}).text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import requests
from bs4 import BeautifulSoup
url = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
}
list_req = requests.post(url, headers=headers)
soup = BeautifulSoup(list_req.content, 'html.parser')
soup.find('td', {'style': 'color:red'}).text
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 10:34:15 2021
@author: Ivan
課程教材:行銷人轉職爬蟲王實戰|5大社群平台+2大電商
版權屬於「楊超霆」所有,若有疑問,可聯絡[email protected]
第一章 爬蟲基本訓練
Html爬蟲Post教學-台灣股市資訊網
"""
import requests
from bs4 import BeautifulSoup
# 要抓取的網址
url = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'
# 附帶的資料必須要有
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36' }
#請求網站
list_req = requests.post(url, headers=headers)
#將整個網站的程式碼爬下來
soup = BeautifulSoup(list_req.content, "html.parser")
#抓取想要的資料
soup.find('td',{'style':'color:red'}).text
|
flexible
|
{
"blob_id": "a5918679b6e3a9bde54808264d9526c6a191578f",
"index": 7737,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsoup.find('td', {'style': 'color:red'}).text\n",
"step-3": "<mask token>\nurl = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n }\nlist_req = requests.post(url, headers=headers)\nsoup = BeautifulSoup(list_req.content, 'html.parser')\nsoup.find('td', {'style': 'color:red'}).text\n",
"step-4": "<mask token>\nimport requests\nfrom bs4 import BeautifulSoup\nurl = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n }\nlist_req = requests.post(url, headers=headers)\nsoup = BeautifulSoup(list_req.content, 'html.parser')\nsoup.find('td', {'style': 'color:red'}).text\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 27 10:34:15 2021\r\n\r\n@author: Ivan\r\n課程教材:行銷人轉職爬蟲王實戰|5大社群平台+2大電商\r\n版權屬於「楊超霆」所有,若有疑問,可聯絡[email protected]\r\n\r\n第一章 爬蟲基本訓練\r\nHtml爬蟲Post教學-台灣股市資訊網\r\n\"\"\"\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n# 要抓取的網址\r\nurl = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'\r\n# 附帶的資料必須要有\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36' }\r\n\r\n#請求網站\r\nlist_req = requests.post(url, headers=headers)\r\n#將整個網站的程式碼爬下來\r\nsoup = BeautifulSoup(list_req.content, \"html.parser\")\r\n#抓取想要的資料\r\nsoup.find('td',{'style':'color:red'}).text\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))
HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(
1).freq(4).currentStep(pool.currentIndex() * random(2, 25))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(4292927458)
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()
).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -
20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(
Callback()).requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))
HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(
1).freq(4).currentStep(pool.currentIndex() * random(2, 25))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 1080765220
color2 = 3296924961
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(4292927458)
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()
).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -
20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(
Callback()).requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))
HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(
1).freq(4).currentStep(pool.currentIndex() * random(2, 25))
<|reserved_special_token_1|>
from hype.core.util import H
from hype.core.interfaces import HCallback
from hype.extended.behavior import HOscillator
from hype.extended.drawable import HCanvas, HRect
from hype.extended.layout import HGridLayout
from hype.extended.util import HDrawablePool
from random import choice
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 1080765220
color2 = 3296924961
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(4292927458)
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()
).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -
20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(
Callback()).requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))
HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(
1).freq(4).currentStep(pool.currentIndex() * random(2, 25))
<|reserved_special_token_1|>
# PDE:
# add_library('hype')
# processing.py:
from hype.core.util import H
from hype.core.interfaces import HCallback
from hype.extended.behavior import HOscillator
from hype.extended.drawable import HCanvas, HRect
from hype.extended.layout import HGridLayout
from hype.extended.util import HDrawablePool
from random import choice
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 0x406B2B24 # #6B2B24
color2 = 0xc4831521 # #831521
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(0xffE0DFE2) # #E0DFE2
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas)\
.add(HRect()
.size(rectRadius * 2)
.noStroke())\
.layout(HGridLayout()
.startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)
.spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)
.cols(5))\
.onCreate(Callback())\
.requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER)\
.fill(choice([color1, color2]))
HOscillator()\
.target(drawable)\
.property(H.ROTATION)\
.range(-5, 5)\
.speed(1)\
.freq(4)\
.currentStep(pool.currentIndex() * random(2, 25))
|
flexible
|
{
"blob_id": "b8a41c56a31acab0181ec364f76010ac12119074",
"index": 5489,
"step-1": "<mask token>\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-2": "<mask token>\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-3": "<mask token>\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-4": "from hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\nfrom random import choice\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-5": "# PDE:\n# add_library('hype')\n# processing.py:\nfrom hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\n\nfrom random import choice\n\n\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 0x406B2B24 # #6B2B24\ncolor2 = 0xc4831521 # #831521\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(0xffE0DFE2) # #E0DFE2\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas)\\\n .add(HRect()\n .size(rectRadius * 2)\n .noStroke())\\\n .layout(HGridLayout()\n .startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)\n .spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)\n .cols(5))\\\n .onCreate(Callback())\\\n .requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER)\\\n .fill(choice([color1, color2]))\n HOscillator()\\\n .target(drawable)\\\n .property(H.ROTATION)\\\n .range(-5, 5)\\\n .speed(1)\\\n .freq(4)\\\n .currentStep(pool.currentIndex() * random(2, 25))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#!/usr/bin/python3
'''
generator.py
This program inputs a strings, and outputs the corresponding hex
Creator: Ethan Knight
Email: [email protected]
Published: 20181116
'''
import sys
import time
import binascii
def main():
print("\n", sys.version_info)
try:
while True:
print("\n\nPress Ctrl+C to exit.")
usr=test()
out=binascii.hexlify(bytes(usr, encoding="utf8"))
print("\nHex:\t\t", out)
print("Base 10:\t", int(out,16))
time.sleep(.5)
except KeyboardInterrupt:
print("\tProgram Terminated\n\n")
sys.exit(0)
def test():
while True:
usr=input("Enter the string to convert\n\n\t")
if usr!="":
return usr
else:
print("\nNo string entered.")
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "a52cbe6dbf4b4fc82d09e5f34e6e135933f3af38",
"index": 1418,
"step-1": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport time\nimport binascii\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n\n'''\n generator.py\n This program inputs a strings, and outputs the corresponding hex\n Creator: Ethan Knight\n Email: [email protected]\n Published: 20181116\n'''\n\nimport sys\nimport time\nimport binascii\n\ndef main():\n print(\"\\n\", sys.version_info)\n try:\n while True:\n print(\"\\n\\nPress Ctrl+C to exit.\")\n usr=test()\n out=binascii.hexlify(bytes(usr, encoding=\"utf8\"))\n print(\"\\nHex:\\t\\t\", out)\n print(\"Base 10:\\t\", int(out,16))\n time.sleep(.5)\n except KeyboardInterrupt:\n print(\"\\tProgram Terminated\\n\\n\")\n sys.exit(0)\n\ndef test():\n while True:\n usr=input(\"Enter the string to convert\\n\\n\\t\")\n if usr!=\"\":\n return usr\n else:\n print(\"\\nNo string entered.\")\n\nif __name__==\"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def square_root(n):
start = 1
end = n
if n == 0 or n == 1:
return n
while start <= end:
mid = (start + end) // 2
if mid * mid == n:
return mid
elif mid * mid < n:
start = mid + 1
else:
end = mid - 1
<|reserved_special_token_1|>
"""
This is a big integer challenge. You are given an integer which is a **perfect
square**. It is composed of 40 or more digits. Compose a function which will
find the exact square root of this integer.
### Examples
square_root(152415787532388367501905199875019052100) ➞ 12345678901234567890
square_root(10203040506070809101112131413121110090807060504030201) ➞ 101010101010101010101010101
### Notes
* All test cases are perfect squares.
* A **good fortune** bonus awaits you if you are able to complete this challenge without importing anything.
"""
def square_root(n):
start = 1
end = n
if n == 0 or n == 1:
return n
while start <= end:
mid = (start+end)//2
if mid*mid == n:
return mid
elif mid*mid < n:
start = mid+1
else:
end = mid-1
|
flexible
|
{
"blob_id": "f9b53df799b3e6b71282c84a625ea5915ccb8014",
"index": 1966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef square_root(n):\n start = 1\n end = n\n if n == 0 or n == 1:\n return n\n while start <= end:\n mid = (start + end) // 2\n if mid * mid == n:\n return mid\n elif mid * mid < n:\n start = mid + 1\n else:\n end = mid - 1\n",
"step-3": "\"\"\"\r\n\n\nThis is a big integer challenge. You are given an integer which is a **perfect\nsquare**. It is composed of 40 or more digits. Compose a function which will\nfind the exact square root of this integer.\n\n### Examples\n\n square_root(152415787532388367501905199875019052100) ➞ 12345678901234567890\n \n square_root(10203040506070809101112131413121110090807060504030201) ➞ 101010101010101010101010101\n\n### Notes\n\n * All test cases are perfect squares.\n * A **good fortune** bonus awaits you if you are able to complete this challenge without importing anything.\n\n\"\"\"\r\n\ndef square_root(n):\n start = 1\n end = n\n if n == 0 or n == 1:\n return n\n while start <= end:\n mid = (start+end)//2\n if mid*mid == n:\n return mid\n elif mid*mid < n:\n start = mid+1\n else:\n end = mid-1\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from product.models import Item,Product,Category
# Register your models here.
admin.site.register(Category,MPTTModelAdmin)
admin.site.register(Item)
admin.site.register(Product)
|
normal
|
{
"blob_id": "fcd3e4c0d42649833e6c5ff6414c993654691d16",
"index": 188,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Category, MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)\n",
"step-3": "from django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom product.models import Item, Product, Category\nadmin.site.register(Category, MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)\n",
"step-4": "from django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom product.models import Item,Product,Category\n# Register your models here.\nadmin.site.register(Category,MPTTModelAdmin)\nadmin.site.register(Item)\nadmin.site.register(Product)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import csv, io
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.views import generic
from itertools import chain
from .models import Player, League, Team
class IndexView(generic.ListView):
template_name = 'players/players.html'
context_object_name = 'players'
def get_queryset(self):
return list(chain(Player.objects.all(), Player._meta.get_fields()))
def rate(stats):
sum = 0.
sum += float(stats[4]) / float(stats[3]) * 90 / 30
sum += float(stats[5]) / float(stats[3]) * 90 / 40
sum += float(stats[6]) / float(stats[3]) * 90 / 2
sum += float(stats[7]) / float(stats[3]) * 90 / 1
sum += float(stats[8]) / float(stats[3]) * 90 / 3
sum += float(stats[9]) / float(stats[3]) * 90 / 1.5
sum += float(stats[10]) / float(stats[3]) * 90 / 5
sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2
sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5
sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5
sum += float(stats[14]) / float(stats[3]) * 90 / 0.5
sum += float(stats[15]) / float(stats[3]) * 90 / 11
sum += float(stats[16]) / float(stats[3]) * 90 / 4
sum += float(stats[17]) / float(stats[3]) * 90 / 1
sum += float(stats[18]) / float(stats[3]) * 90 / 2
sum += float(stats[19]) / float(stats[3]) * 90 / 1
sum += float(stats[20]) / float(stats[3]) * 90 / 1
sum += float(stats[21]) / float(stats[3]) * 90 / 1
sum += float(stats[22]) / float(stats[3]) * 90 / 2.5
sum += float(stats[23]) / float(stats[3]) * 90 / 1
sum += float(stats[24]) / float(stats[3]) * 90 / 2
sum += float(stats[25]) / float(stats[3]) * 90 / 1
sum += float(stats[26]) / float(stats[3]) * 90 / 5
sum += float(stats[27]) / float(stats[3]) * 90 / 0.5
sum += float(stats[28]) / float(stats[3]) * 90 / 10
return sum
@permission_required('admin.can_addlog_entry')
def player_upload(request):
template = 'players/player_upload.html'
prompt = {
'order': ''
}
if request.method == "GET":
return render(request, template, prompt)
csv_file = request.FILES['file']
if not csv_file.name.endswith('.csv'):
messages.error(request, 'This is not a csv file')
data_set = csv_file.read().decode('UTF-8')
io_string = io.StringIO(data_set)
next(io_string)
for column in csv.reader(io_string, delimiter=':', quotechar='|'):
for i, stat in enumerate(column):
if i in [0, 1]:
continue
column[i] = column[i].replace('Åš', 'Ś')
column[i] = column[i].replace(',', '.')
column[i] = column[i].replace('km', '')
column[i] = column[i].replace('Â\xa0', '')
column[i] = column[i].replace('-', '0')
if int(column[3]) < 180:
continue
if column[32] == '0':
continue
if not League.objects.filter(name=column[32]):
League.objects.update_or_create(
name=column[32]
)
if not Team.objects.filter(name=column[31]):
Team.objects.update_or_create(
league_id=League.objects.filter(name=column[32])[0].id,
name=column[31]
)
_, created = Player.objects.update_or_create(
team_id=2,
name=column[0],
age=column[2],
position=column[1],
minutes=column[3],
accurate_passes=float(column[4])/float(column[3])*90,
passes=float(column[5])/float(column[3])*90,
created_situations=float(column[6])/float(column[3])*90,
key_passes=float(column[7])/float(column[3])*90,
dribble=float(column[8])/float(column[3])*90,
fouls_on=float(column[9])/float(column[3])*90,
offsides=float(column[10])/float(column[3])*90,
mistakes=float(column[11])/float(column[3])*90,
culpable_goals=float(column[12])/float(column[3])*90,
accurate_cross=float(column[13])/float(column[3])*90,
assists=float(column[14])/float(column[3])*90,
heads=float(column[15])/float(column[3])*90,
tackles=float(column[16])/float(column[3])*90,
key_heads=float(column[17])/float(column[3])*90,
interceptions=float(column[18])/float(column[3])*90,
catch_saves=float(column[19])/float(column[3])*90,
saves=float(column[20])/float(column[3])*90,
saves_on_corner=float(column[21])/float(column[3])*90,
complete_tackles=float(column[22])/float(column[3])*90,
accurate_shots=float(column[23])/float(column[3])*90,
shots=float(column[24])/float(column[3])*90,
key_tackles=float(column[25])/float(column[3])*90,
win_heads=float(column[26])/float(column[3])*90,
goals=float(column[27])/float(column[3])*90,
crosses=float(column[28])/float(column[3])*90,
rating=float(column[29]),
club=column[31],
league=column[32],
rate=rate(column)
)
context = {}
return render(request, template, context)
@permission_required('admin.can_addlog_entry')
def player_delete(request):
Player.objects.all().delete()
return redirect('player_upload')
@permission_required('admin.can_addlog_entry')
def player_club_delete(request, club):
Player.objects.filter(club=club).delete()
return redirect('players')
@permission_required('admin.can_addlog_entry')
def player_league_delete(request, league):
Player.objects.filter(league=league).delete()
return redirect('players')
|
normal
|
{
"blob_id": "bce794616889b80c152a8ebec8d02e49a96684e9",
"index": 2955,
"step-1": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\n<mask token>\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n",
"step-2": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n<mask token>\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n",
"step-3": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_delete(request):\n Player.objects.all().delete()\n return redirect('player_upload')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n",
"step-4": "import csv, io\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.views import generic\nfrom itertools import chain\nfrom .models import Player, League, Team\n\n\nclass IndexView(generic.ListView):\n template_name = 'players/players.html'\n context_object_name = 'players'\n\n def get_queryset(self):\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\n\n\ndef rate(stats):\n sum = 0.0\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\n return sum\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_upload(request):\n template = 'players/player_upload.html'\n prompt = {'order': ''}\n if request.method == 'GET':\n return render(request, template, prompt)\n csv_file = request.FILES['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'This is not a csv file')\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\n for i, stat in enumerate(column):\n if i in [0, 1]:\n continue\n column[i] = column[i].replace('Åš', 'Ś')\n column[i] = column[i].replace(',', '.')\n column[i] = column[i].replace('km', '')\n column[i] = column[i].replace('Â\\xa0', '')\n column[i] = column[i].replace('-', '0')\n if int(column[3]) < 180:\n continue\n if column[32] == '0':\n continue\n if not League.objects.filter(name=column[32]):\n League.objects.update_or_create(name=column[32])\n if not Team.objects.filter(name=column[31]):\n Team.objects.update_or_create(league_id=League.objects.filter(\n name=column[32])[0].id, name=column[31])\n _, created = Player.objects.update_or_create(team_id=2, name=column\n [0], age=column[2], position=column[1], minutes=column[3],\n accurate_passes=float(column[4]) / float(column[3]) * 90,\n passes=float(column[5]) / float(column[3]) * 90,\n created_situations=float(column[6]) / float(column[3]) * 90,\n key_passes=float(column[7]) / float(column[3]) * 90, dribble=\n float(column[8]) / float(column[3]) * 90, fouls_on=float(column\n [9]) / float(column[3]) * 90, offsides=float(column[10]) /\n float(column[3]) * 90, mistakes=float(column[11]) / float(\n column[3]) * 90, culpable_goals=float(column[12]) / float(\n column[3]) * 90, accurate_cross=float(column[13]) / float(\n column[3]) * 90, assists=float(column[14]) / float(column[3]) *\n 90, heads=float(column[15]) / float(column[3]) * 90, tackles=\n float(column[16]) / float(column[3]) * 90, key_heads=float(\n column[17]) / float(column[3]) * 90, interceptions=float(column\n [18]) / float(column[3]) * 90, catch_saves=float(column[19]) /\n float(column[3]) * 90, saves=float(column[20]) / float(column[3\n ]) * 90, saves_on_corner=float(column[21]) / float(column[3]) *\n 90, complete_tackles=float(column[22]) / float(column[3]) * 90,\n accurate_shots=float(column[23]) / float(column[3]) * 90, shots\n =float(column[24]) / float(column[3]) * 90, key_tackles=float(\n column[25]) / float(column[3]) * 90, win_heads=float(column[26]\n ) / float(column[3]) * 90, goals=float(column[27]) / float(\n column[3]) * 90, crosses=float(column[28]) / float(column[3]) *\n 90, rating=float(column[29]), club=column[31], league=column[32\n ], rate=rate(column))\n context = {}\n return render(request, template, context)\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_delete(request):\n Player.objects.all().delete()\n return redirect('player_upload')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_club_delete(request, club):\n Player.objects.filter(club=club).delete()\n return redirect('players')\n\n\n@permission_required('admin.can_addlog_entry')\ndef player_league_delete(request, league):\n Player.objects.filter(league=league).delete()\n return redirect('players')\n",
"step-5": "import csv, io\r\nfrom django.shortcuts import render, redirect\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import permission_required\r\nfrom django.views import generic\r\nfrom itertools import chain\r\n\r\nfrom .models import Player, League, Team\r\n\r\n\r\nclass IndexView(generic.ListView):\r\n template_name = 'players/players.html'\r\n context_object_name = 'players'\r\n\r\n def get_queryset(self):\r\n return list(chain(Player.objects.all(), Player._meta.get_fields()))\r\n\r\n\r\ndef rate(stats):\r\n sum = 0.\r\n\r\n sum += float(stats[4]) / float(stats[3]) * 90 / 30\r\n sum += float(stats[5]) / float(stats[3]) * 90 / 40\r\n sum += float(stats[6]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[7]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[8]) / float(stats[3]) * 90 / 3\r\n sum += float(stats[9]) / float(stats[3]) * 90 / 1.5\r\n sum += float(stats[10]) / float(stats[3]) * 90 / 5\r\n sum -= float(stats[11]) / float(stats[3]) * 90 / 1.2\r\n sum -= float(stats[12]) / float(stats[3]) * 90 / 0.5\r\n sum -= float(stats[13]) / float(stats[3]) * 90 / 1.5\r\n sum += float(stats[14]) / float(stats[3]) * 90 / 0.5\r\n sum += float(stats[15]) / float(stats[3]) * 90 / 11\r\n sum += float(stats[16]) / float(stats[3]) * 90 / 4\r\n sum += float(stats[17]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[18]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[19]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[20]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[21]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[22]) / float(stats[3]) * 90 / 2.5\r\n sum += float(stats[23]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[24]) / float(stats[3]) * 90 / 2\r\n sum += float(stats[25]) / float(stats[3]) * 90 / 1\r\n sum += float(stats[26]) / float(stats[3]) * 90 / 5\r\n sum += float(stats[27]) / float(stats[3]) * 90 / 0.5\r\n sum += float(stats[28]) / float(stats[3]) * 90 / 10\r\n\r\n return sum\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_upload(request):\r\n template = 'players/player_upload.html'\r\n\r\n prompt = {\r\n 'order': ''\r\n }\r\n\r\n if request.method == \"GET\":\r\n return render(request, template, prompt)\r\n\r\n csv_file = request.FILES['file']\r\n\r\n if not csv_file.name.endswith('.csv'):\r\n messages.error(request, 'This is not a csv file')\r\n\r\n data_set = csv_file.read().decode('UTF-8')\r\n io_string = io.StringIO(data_set)\r\n next(io_string)\r\n for column in csv.reader(io_string, delimiter=':', quotechar='|'):\r\n for i, stat in enumerate(column):\r\n if i in [0, 1]:\r\n continue\r\n\r\n column[i] = column[i].replace('Åš', 'Ś')\r\n column[i] = column[i].replace(',', '.')\r\n column[i] = column[i].replace('km', '')\r\n column[i] = column[i].replace('Â\\xa0', '')\r\n column[i] = column[i].replace('-', '0')\r\n if int(column[3]) < 180:\r\n continue\r\n if column[32] == '0':\r\n continue\r\n if not League.objects.filter(name=column[32]):\r\n League.objects.update_or_create(\r\n name=column[32]\r\n )\r\n if not Team.objects.filter(name=column[31]):\r\n Team.objects.update_or_create(\r\n league_id=League.objects.filter(name=column[32])[0].id,\r\n name=column[31]\r\n )\r\n _, created = Player.objects.update_or_create(\r\n team_id=2,\r\n name=column[0],\r\n age=column[2],\r\n position=column[1],\r\n minutes=column[3],\r\n accurate_passes=float(column[4])/float(column[3])*90,\r\n passes=float(column[5])/float(column[3])*90,\r\n created_situations=float(column[6])/float(column[3])*90,\r\n key_passes=float(column[7])/float(column[3])*90,\r\n dribble=float(column[8])/float(column[3])*90,\r\n fouls_on=float(column[9])/float(column[3])*90,\r\n offsides=float(column[10])/float(column[3])*90,\r\n mistakes=float(column[11])/float(column[3])*90,\r\n culpable_goals=float(column[12])/float(column[3])*90,\r\n accurate_cross=float(column[13])/float(column[3])*90,\r\n assists=float(column[14])/float(column[3])*90,\r\n heads=float(column[15])/float(column[3])*90,\r\n tackles=float(column[16])/float(column[3])*90,\r\n key_heads=float(column[17])/float(column[3])*90,\r\n interceptions=float(column[18])/float(column[3])*90,\r\n catch_saves=float(column[19])/float(column[3])*90,\r\n saves=float(column[20])/float(column[3])*90,\r\n saves_on_corner=float(column[21])/float(column[3])*90,\r\n complete_tackles=float(column[22])/float(column[3])*90,\r\n accurate_shots=float(column[23])/float(column[3])*90,\r\n shots=float(column[24])/float(column[3])*90,\r\n key_tackles=float(column[25])/float(column[3])*90,\r\n win_heads=float(column[26])/float(column[3])*90,\r\n goals=float(column[27])/float(column[3])*90,\r\n crosses=float(column[28])/float(column[3])*90,\r\n rating=float(column[29]),\r\n club=column[31],\r\n league=column[32],\r\n rate=rate(column)\r\n )\r\n\r\n context = {}\r\n return render(request, template, context)\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_delete(request):\r\n Player.objects.all().delete()\r\n return redirect('player_upload')\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_club_delete(request, club):\r\n Player.objects.filter(club=club).delete()\r\n return redirect('players')\r\n\r\n\r\n@permission_required('admin.can_addlog_entry')\r\ndef player_league_delete(request, league):\r\n Player.objects.filter(league=league).delete()\r\n return redirect('players')\r\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
#Max Low
#9-25-17
#quiz2.py -- numbers , bigger smaller same, divisible by 3, product and correct person
numone = int(input('Enter a number: '))
numtwo = int(input('Enter a 2nd number: '))
if numone > numtwo:
print('The first number is bigger')
elif numtwo > numone:
print('The second number is bigger')
else:
print('The numbers are the same')
if numone % 3 == 0 and numtwo % 3 == 0:
print('They are both divisible by 3')
elif numone % 3 == 0:
print('Only the first number is divisible by three')
elif numtwo % 3 == 0:
print('Only the second number is divisible by three')
else:
print('Neither number is divisible by 3')
product = int(input('What is the product of your two numbers?: '))
if product == numone*numtwo:
print('correct')
else:
print('incorrect')
|
normal
|
{
"blob_id": "a67612e8301728d1fb366d7c8909fa830f04bf45",
"index": 9739,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\n<mask token>\nif product == numone * numtwo:\n print('correct')\nelse:\n print('incorrect')\n",
"step-3": "numone = int(input('Enter a number: '))\nnumtwo = int(input('Enter a 2nd number: '))\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\nproduct = int(input('What is the product of your two numbers?: '))\nif product == numone * numtwo:\n print('correct')\nelse:\n print('incorrect')\n",
"step-4": "#Max Low\n#9-25-17\n#quiz2.py -- numbers , bigger smaller same, divisible by 3, product and correct person\n\nnumone = int(input('Enter a number: '))\nnumtwo = int(input('Enter a 2nd number: '))\n\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\n\n \nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\n\nproduct = int(input('What is the product of your two numbers?: '))\nif product == numone*numtwo:\n print('correct')\nelse:\n print('incorrect')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('file.txt', 'w') as file:
content = file.write('Tanyuhich')
try:
browser = webdriver.Chrome()
browser.get('http://suninjuly.github.io/file_input.html')
input1 = browser.find_element_by_name('firstname')
input1.send_keys('Ivan')
input2 = browser.find_element_by_name('lastname')
input2.send_keys('Petrov')
input3 = browser.find_element_by_name('email')
input3.send_keys('[email protected]')
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector('button.btn')
button.click()
finally:
time.sleep(30)
browser.quit()
<|reserved_special_token_1|>
import time
from selenium import webdriver
import os
from selenium.webdriver.common.by import By
with open('file.txt', 'w') as file:
content = file.write('Tanyuhich')
try:
browser = webdriver.Chrome()
browser.get('http://suninjuly.github.io/file_input.html')
input1 = browser.find_element_by_name('firstname')
input1.send_keys('Ivan')
input2 = browser.find_element_by_name('lastname')
input2.send_keys('Petrov')
input3 = browser.find_element_by_name('email')
input3.send_keys('[email protected]')
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector('button.btn')
button.click()
finally:
time.sleep(30)
browser.quit()
<|reserved_special_token_1|>
import time
from selenium import webdriver
import os
from selenium.webdriver.common.by import By
with open("file.txt", "w") as file:
content = file.write("Tanyuhich")
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/file_input.html")
input1 = browser.find_element_by_name('firstname')
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('lastname')
input2.send_keys("Petrov")
input3 = browser.find_element_by_name('email')
input3.send_keys("[email protected]")
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
|
flexible
|
{
"blob_id": "03270285c6dc99d8dcb9804270421f36b573048c",
"index": 2863,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('[email protected]')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-3": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('[email protected]')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-4": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\n\nwith open(\"file.txt\", \"w\") as file:\n content = file.write(\"Tanyuhich\")\n \ntry:\n browser = webdriver.Chrome()\n browser.get(\"http://suninjuly.github.io/file_input.html\")\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys(\"Ivan\")\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys(\"Petrov\")\n input3 = browser.find_element_by_name('email')\n input3.send_keys(\"[email protected]\")\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
This module provides a script to extract data from all JSON files stored in a specific directory and create a HTML
table for an better overview of the data.
.. moduleauthor:: Maximilian Springenberg <[email protected]>
|
"""
from collections import defaultdict
from argparse import ArgumentParser
import os
import sys
import json
import pandas as pd
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))
sys.path.append(SRC_DIR)
sys.path.append(FILE_DIR)
from src.util import sanity_util
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
# sanity of paths
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))
# reading JSON files
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
# DataFrame conversion
df = pd.DataFrame.from_dict(table)
# writing HTML table
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return '''
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n" +
"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n" +
"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n" +
"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
'''
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
if __name__ == '__main__':
arg_parser = parser()
args = vars(arg_parser.parse_args())
jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'], name=args['name'], format='html')
|
normal
|
{
"blob_id": "d6e836140b1f9c955711402111dc07e74b4a23b1",
"index": 1621,
"step-1": "<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\n<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n",
"step-3": "<mask token>\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\n<mask token>\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n",
"step-4": "<mask token>\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\nimport os\nimport sys\nimport json\nimport pandas as pd\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\nfrom src.util import sanity_util\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=\n '.{}'.format(format))\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.\n listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n df = pd.DataFrame.from_dict(table)\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return \"\"\"\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n\" +\n \"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n\" +\n \"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n\" +\n \"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n \"\"\"\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],\n name=args['name'], format='html')\n",
"step-5": "\"\"\"\nThis module provides a script to extract data from all JSON files stored in a specific directory and create a HTML\ntable for an better overview of the data.\n\n.. moduleauthor:: Maximilian Springenberg <[email protected]>\n\n|\n\n\"\"\"\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\n\nimport os\nimport sys\nimport json\nimport pandas as pd\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))\nsys.path.append(SRC_DIR)\nsys.path.append(FILE_DIR)\nfrom src.util import sanity_util\n\n\ndef jsons_to_table(dir_jsons, dir_out, name, format='html'):\n \"\"\"\n Extracts the informations stored in the JSON files and stores creates an HTML-table for them.\n\n :param dir_jsons: directory of JSON files\n :param dir_out: output directory of the HTML-table\n :param name: name of the HTML page\n \"\"\"\n # sanity of paths\n dir_out = sanity_util.safe_dir_path(dir_path=dir_out)\n file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))\n # reading JSON files\n p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])\n table = defaultdict(list)\n keys = set()\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n keys.add(k)\n for p_f in p_files:\n if p_f.lower().endswith('.json'):\n with open(p_f, 'r') as f_json:\n el = json.load(f_json)\n for k in el.keys():\n table[k].append(el[k])\n for k in keys.difference(set(el.keys())):\n table[k].append(None)\n # DataFrame conversion\n df = pd.DataFrame.from_dict(table)\n # writing HTML table\n if format == 'html':\n table_str = df.to_html()\n else:\n table_str = df.to_latex()\n table_str += '<script type=\"text/javascript\" src=\"stylize.js\"></script>'\n stylize_js = js_stylize()\n with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:\n f_js.write(stylize_js)\n with open(file_name, 'w') as f_out:\n f_out.write(table_str)\n\n\ndef js_stylize():\n return '''\n /**\n * small script to stylize raw html tables\n * @author Maximilian Springenberg <[email protected]>\n */\n \n \n /**\n * adding all bootstrap relevent dependencies to the headder\n */\n function add_bootsrap(){\n document.head.innerHTML +=\n \"<link rel=\\\"stylesheet\\\" href=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\\\">\\n\" +\n \"<script src=\\\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\\\"></script>\\n\" +\n \"<script src=\\\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\\\"></script>\\n\" +\n \"<script src=\\\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\\\"></script>\";\n }\n \n \n /**\n * setting classnames of a specific tag\n */\n function style_tag(tagName, className){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].className = className;\n }\n }\n \n \n /**\n * setting the (Bootstrap) contenteditable flag for a specific tag\n */\n function editable_tag(tagName, editable){\n tags = document.getElementsByTagName(tagName);\n for(let i=0; i<tags.length; ++i){\n tags[i].setAttribute('contenteditable', editable);\n }\n }\n \n \n // setting title\n document.title = 'PHOCNet Table';\n // adding bootstrap\n add_bootsrap();\n // stylize tables\n style_tag('table', 'table table-responsive-md');\n style_tag('thead', 'thead-dark');\n // enable editable table-divisions\n editable_tag('td', 'true'); \n '''\n\n\ndef parser():\n \"\"\"\n Creates a parser of this script.\n\n :return: args-parser with the following arguments\n\n\n Positional:\n\n =============== ======================================================\n arg semantic\n =============== ======================================================\n dir_jsons directory of JSON files\n dir_out the directory to safe the HTML page to\n file_name name of the HTML file\n =============== ======================================================\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser\n\n\nif __name__ == '__main__':\n arg_parser = parser()\n args = vars(arg_parser.parse_args())\n jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'], name=args['name'], format='html')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.shortcuts import render
from .models import Votings
from .serializers import VotingsSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
import requests, json
@api_view(['GET'])
def votings(request):
votings = Votings.objects.all()
if votings:
return Response({}, status=status.HTTP_404_NOT_FOUND)
else:
serializer = VotingsSerializer(votings)
r = requests.get('https://api.myjson.com/bins/17w6e1', serializer)
data = json.loads(r.text)
return Response(data, status=status.HTTP_201_CREATED)
|
normal
|
{
"blob_id": "c3ecac1c0facbf6f0905bb03fd337a7f4f5bbeff",
"index": 4376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef votings(request):\n votings = Votings.objects.all()\n if votings:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = VotingsSerializer(votings)\n r = requests.get('https://api.myjson.com/bins/17w6e1', serializer)\n data = json.loads(r.text)\n return Response(data, status=status.HTTP_201_CREATED)\n",
"step-3": "from django.shortcuts import render\nfrom .models import Votings\nfrom .serializers import VotingsSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nimport requests, json\n\n\n@api_view(['GET'])\ndef votings(request):\n votings = Votings.objects.all()\n if votings:\n return Response({}, status=status.HTTP_404_NOT_FOUND)\n else:\n serializer = VotingsSerializer(votings)\n r = requests.get('https://api.myjson.com/bins/17w6e1', serializer)\n data = json.loads(r.text)\n return Response(data, status=status.HTTP_201_CREATED)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ReportingDealer(game_dealer.GameDealer):
<|reserved_special_token_0|>
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y, too_many)
self.assertTrue('Sorry' in too_many_collapsed)
too_many_collapsed.remove('Sorry')
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)
for i in range(12):
way_too_many_collapsed.remove('Sorry')
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
<|reserved_special_token_0|>
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y, too_many)
self.assertTrue('Sorry' in too_many_collapsed)
too_many_collapsed.remove('Sorry')
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)
for i in range(12):
way_too_many_collapsed.remove('Sorry')
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
<|reserved_special_token_0|>
WHOLE_DECK = sorted(game_dealer.Deck())
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y, too_many)
self.assertTrue('Sorry' in too_many_collapsed)
too_many_collapsed.remove('Sorry')
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)
for i in range(12):
way_too_many_collapsed.remove('Sorry')
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import unittest
import os, sys
from functools import reduce
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
import Lab19_Extending_Builtins.lab19_3 as game_dealer
WHOLE_DECK = sorted(game_dealer.Deck())
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y, too_many)
self.assertTrue('Sorry' in too_many_collapsed)
too_many_collapsed.remove('Sorry')
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)
for i in range(12):
way_too_many_collapsed.remove('Sorry')
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""(Optional) Test for GameDealer class."""
import unittest
import os, sys
from functools import reduce
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], ".."))
import Lab19_Extending_Builtins.lab19_3 as game_dealer
WHOLE_DECK = sorted(game_dealer.Deck())
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(
reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y,
too_many)
self.assertTrue("Sorry" in too_many_collapsed)
too_many_collapsed.remove("Sorry")
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y,
way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count("Sorry"),
12)
for i in range(12):
way_too_many_collapsed.remove("Sorry")
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "06a721c12e3140d4d1cf544a598f512595c4ab66",
"index": 3013,
"step-1": "<mask token>\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n <mask token>\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\n<mask token>\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\n<mask token>\nWHOLE_DECK = sorted(game_dealer.Deck())\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport os, sys\nfrom functools import reduce\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\nimport Lab19_Extending_Builtins.lab19_3 as game_dealer\nWHOLE_DECK = sorted(game_dealer.Deck())\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"(Optional) Test for GameDealer class.\"\"\"\n\nimport unittest\n\nimport os, sys\nfrom functools import reduce\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], \"..\"))\n \nimport Lab19_Extending_Builtins.lab19_3 as game_dealer\n\nWHOLE_DECK = sorted(game_dealer.Deck())\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(\n reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n\n\n\n \n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y,\n too_many)\n self.assertTrue(\"Sorry\" in too_many_collapsed)\n too_many_collapsed.remove(\"Sorry\")\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y,\n way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count(\"Sorry\"),\n 12)\n for i in range(12):\n way_too_many_collapsed.remove(\"Sorry\")\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.