code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Handlers:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Handlers:
change_store = '/change_store'
change_status = '/change_status'
mail = '/mail'
get_status = '/get_status'
create_order = '/create_order'
ask_store = '/ask_store'
check = '/check'
test = '/test'
<|reserved_special_token_1|>
class Handlers():
change_store = "/change_store"
change_status = "/change_status"
mail = "/mail"
get_status = "/get_status"
create_order = "/create_order"
ask_store = "/ask_store"
check = "/check"
test = "/test"
|
flexible
|
{
"blob_id": "32e3eed2e279706bca2925d3d9d897a928243b4c",
"index": 4518,
"step-1": "<mask token>\n",
"step-2": "class Handlers:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Handlers:\n change_store = '/change_store'\n change_status = '/change_status'\n mail = '/mail'\n get_status = '/get_status'\n create_order = '/create_order'\n ask_store = '/ask_store'\n check = '/check'\n test = '/test'\n",
"step-4": "class Handlers():\n change_store = \"/change_store\"\n change_status = \"/change_status\"\n mail = \"/mail\"\n get_status = \"/get_status\"\n create_order = \"/create_order\"\n ask_store = \"/ask_store\"\n check = \"/check\"\n test = \"/test\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def _from_meminfo_ptr(ptr, dicttype):
d = TypedDict(meminfo=ptr, dcttype=dicttype)
return d
class TypedDict(MutableMapping):
"""A typed-dictionary usable in Numba compiled functions.
Implements the MutableMapping interface.
"""
@classmethod
def empty(cls, key_type, value_type):
"""Create a new empty TypedDict with *key_type* and *value_type*
as the types for the keys and values of the dictionary respectively.
"""
return cls(dcttype=DictType(key_type, value_type))
def __init__(self, **kwargs):
"""
Parameters
----------
dcttype : numba.types.DictType; keyword-only
The dictionary type
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
self._dict_type, self._opaque = self._parse_arg(**kwargs)
def _parse_arg(self, dcttype, meminfo=None):
if not isinstance(dcttype, DictType):
raise TypeError('*dcttype* must be a DictType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_dict(dcttype.key_type, dcttype.value_type)
return dcttype, opaque
@property
def _numba_type_(self):
return self._dict_type
def __getitem__(self, key):
return _getitem(self, key)
def __setitem__(self, key, value):
return _setitem(self, key, value)
def __delitem__(self, key):
_delitem(self, key)
def __iter__(self):
return iter(_iter(self))
def __len__(self):
return _length(self)
def __contains__(self, key):
return _contains(self, key)
def __str__(self):
buf = []
for k, v in self.items():
buf.append('{}: {}'.format(k, v))
return '{{{0}}}'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._dict_type)
return '{prefix}({body})'.format(prefix=prefix, body=body)
def get(self, key, default=None):
return _get(self, key, default)
def setdefault(self, key, default=None):
return _setdefault(self, key, default)
def copy(self):
return _copy(self)
<|reserved_special_token_0|>
@box(types.DictType)
def box_dicttype(typ, val, c):
context = c.context
builder = c.builder
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder, value=val)
boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)
numba_name = c.context.insert_const_string(c.builder.module, 'numba')
numba_mod = c.pyapi.import_module_noblock(numba_name)
typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')
fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')
dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typeddict_mod)
c.pyapi.decref(numba_mod)
c.pyapi.decref(boxed_meminfo)
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@njit
def _make_dict(keyty, valty):
return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))
<|reserved_special_token_0|>
@njit
def _setitem(d, key, value):
d[key] = value
<|reserved_special_token_0|>
@njit
def _delitem(d, key):
del d[key]
<|reserved_special_token_0|>
@njit
def _get(d, key, default):
return d.get(key, default)
<|reserved_special_token_0|>
def _from_meminfo_ptr(ptr, dicttype):
d = TypedDict(meminfo=ptr, dcttype=dicttype)
return d
class TypedDict(MutableMapping):
"""A typed-dictionary usable in Numba compiled functions.
Implements the MutableMapping interface.
"""
@classmethod
def empty(cls, key_type, value_type):
"""Create a new empty TypedDict with *key_type* and *value_type*
as the types for the keys and values of the dictionary respectively.
"""
return cls(dcttype=DictType(key_type, value_type))
def __init__(self, **kwargs):
"""
Parameters
----------
dcttype : numba.types.DictType; keyword-only
The dictionary type
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
self._dict_type, self._opaque = self._parse_arg(**kwargs)
def _parse_arg(self, dcttype, meminfo=None):
if not isinstance(dcttype, DictType):
raise TypeError('*dcttype* must be a DictType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_dict(dcttype.key_type, dcttype.value_type)
return dcttype, opaque
@property
def _numba_type_(self):
return self._dict_type
def __getitem__(self, key):
return _getitem(self, key)
def __setitem__(self, key, value):
return _setitem(self, key, value)
def __delitem__(self, key):
_delitem(self, key)
def __iter__(self):
return iter(_iter(self))
def __len__(self):
return _length(self)
def __contains__(self, key):
return _contains(self, key)
def __str__(self):
buf = []
for k, v in self.items():
buf.append('{}: {}'.format(k, v))
return '{{{0}}}'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._dict_type)
return '{prefix}({body})'.format(prefix=prefix, body=body)
def get(self, key, default=None):
return _get(self, key, default)
def setdefault(self, key, default=None):
return _setdefault(self, key, default)
def copy(self):
return _copy(self)
@overload_method(TypeRef, 'empty')
def typeddict_empty(cls, key_type, value_type):
if cls.instance_type is not DictType:
return
def impl(cls, key_type, value_type):
return dictobject.new_dict(key_type, value_type)
return impl
@box(types.DictType)
def box_dicttype(typ, val, c):
context = c.context
builder = c.builder
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder, value=val)
boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)
numba_name = c.context.insert_const_string(c.builder.module, 'numba')
numba_mod = c.pyapi.import_module_noblock(numba_name)
typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')
fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')
dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typeddict_mod)
c.pyapi.decref(numba_mod)
c.pyapi.decref(boxed_meminfo)
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@njit
def _make_dict(keyty, valty):
return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))
@njit
def _length(d):
return len(d)
@njit
def _setitem(d, key, value):
d[key] = value
@njit
def _getitem(d, key):
return d[key]
@njit
def _delitem(d, key):
del d[key]
<|reserved_special_token_0|>
@njit
def _get(d, key, default):
return d.get(key, default)
<|reserved_special_token_0|>
@njit
def _copy(d):
return d.copy()
def _from_meminfo_ptr(ptr, dicttype):
d = TypedDict(meminfo=ptr, dcttype=dicttype)
return d
class TypedDict(MutableMapping):
"""A typed-dictionary usable in Numba compiled functions.
Implements the MutableMapping interface.
"""
@classmethod
def empty(cls, key_type, value_type):
"""Create a new empty TypedDict with *key_type* and *value_type*
as the types for the keys and values of the dictionary respectively.
"""
return cls(dcttype=DictType(key_type, value_type))
def __init__(self, **kwargs):
"""
Parameters
----------
dcttype : numba.types.DictType; keyword-only
The dictionary type
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
self._dict_type, self._opaque = self._parse_arg(**kwargs)
def _parse_arg(self, dcttype, meminfo=None):
if not isinstance(dcttype, DictType):
raise TypeError('*dcttype* must be a DictType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_dict(dcttype.key_type, dcttype.value_type)
return dcttype, opaque
@property
def _numba_type_(self):
return self._dict_type
def __getitem__(self, key):
return _getitem(self, key)
def __setitem__(self, key, value):
return _setitem(self, key, value)
def __delitem__(self, key):
_delitem(self, key)
def __iter__(self):
return iter(_iter(self))
def __len__(self):
return _length(self)
def __contains__(self, key):
return _contains(self, key)
def __str__(self):
buf = []
for k, v in self.items():
buf.append('{}: {}'.format(k, v))
return '{{{0}}}'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._dict_type)
return '{prefix}({body})'.format(prefix=prefix, body=body)
def get(self, key, default=None):
return _get(self, key, default)
def setdefault(self, key, default=None):
return _setdefault(self, key, default)
def copy(self):
return _copy(self)
@overload_method(TypeRef, 'empty')
def typeddict_empty(cls, key_type, value_type):
if cls.instance_type is not DictType:
return
def impl(cls, key_type, value_type):
return dictobject.new_dict(key_type, value_type)
return impl
@box(types.DictType)
def box_dicttype(typ, val, c):
context = c.context
builder = c.builder
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder, value=val)
boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)
numba_name = c.context.insert_const_string(c.builder.module, 'numba')
numba_mod = c.pyapi.import_module_noblock(numba_name)
typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')
fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')
dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typeddict_mod)
c.pyapi.decref(numba_mod)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(types.DictType)
def unbox_dicttype(typ, val, c):
context = c.context
builder = c.builder
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(data_pointer, dictobject.ll_dict_type.
as_pointer())
dstruct.data = builder.load(data_pointer)
dstruct.meminfo = mi
dctobj = dstruct._getvalue()
c.pyapi.decref(miptr)
return NativeValue(dctobj)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@njit
def _make_dict(keyty, valty):
return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))
@njit
def _length(d):
return len(d)
@njit
def _setitem(d, key, value):
d[key] = value
@njit
def _getitem(d, key):
return d[key]
@njit
def _delitem(d, key):
del d[key]
<|reserved_special_token_0|>
@njit
def _get(d, key, default):
return d.get(key, default)
@njit
def _setdefault(d, key, default):
return d.setdefault(key, default)
<|reserved_special_token_0|>
@njit
def _copy(d):
return d.copy()
def _from_meminfo_ptr(ptr, dicttype):
d = TypedDict(meminfo=ptr, dcttype=dicttype)
return d
class TypedDict(MutableMapping):
"""A typed-dictionary usable in Numba compiled functions.
Implements the MutableMapping interface.
"""
@classmethod
def empty(cls, key_type, value_type):
"""Create a new empty TypedDict with *key_type* and *value_type*
as the types for the keys and values of the dictionary respectively.
"""
return cls(dcttype=DictType(key_type, value_type))
def __init__(self, **kwargs):
"""
Parameters
----------
dcttype : numba.types.DictType; keyword-only
The dictionary type
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
self._dict_type, self._opaque = self._parse_arg(**kwargs)
def _parse_arg(self, dcttype, meminfo=None):
if not isinstance(dcttype, DictType):
raise TypeError('*dcttype* must be a DictType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_dict(dcttype.key_type, dcttype.value_type)
return dcttype, opaque
@property
def _numba_type_(self):
return self._dict_type
def __getitem__(self, key):
return _getitem(self, key)
def __setitem__(self, key, value):
return _setitem(self, key, value)
def __delitem__(self, key):
_delitem(self, key)
def __iter__(self):
return iter(_iter(self))
def __len__(self):
return _length(self)
def __contains__(self, key):
return _contains(self, key)
def __str__(self):
buf = []
for k, v in self.items():
buf.append('{}: {}'.format(k, v))
return '{{{0}}}'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._dict_type)
return '{prefix}({body})'.format(prefix=prefix, body=body)
def get(self, key, default=None):
return _get(self, key, default)
def setdefault(self, key, default=None):
return _setdefault(self, key, default)
def copy(self):
return _copy(self)
@overload_method(TypeRef, 'empty')
def typeddict_empty(cls, key_type, value_type):
if cls.instance_type is not DictType:
return
def impl(cls, key_type, value_type):
return dictobject.new_dict(key_type, value_type)
return impl
@box(types.DictType)
def box_dicttype(typ, val, c):
context = c.context
builder = c.builder
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder, value=val)
boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)
numba_name = c.context.insert_const_string(c.builder.module, 'numba')
numba_mod = c.pyapi.import_module_noblock(numba_name)
typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')
fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')
dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typeddict_mod)
c.pyapi.decref(numba_mod)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(types.DictType)
def unbox_dicttype(typ, val, c):
context = c.context
builder = c.builder
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(data_pointer, dictobject.ll_dict_type.
as_pointer())
dstruct.data = builder.load(data_pointer)
dstruct.meminfo = mi
dctobj = dstruct._getvalue()
c.pyapi.decref(miptr)
return NativeValue(dctobj)
<|reserved_special_token_1|>
"""
Python wrapper that connects CPython interpreter to the numba dictobject.
"""
from collections import MutableMapping
from numba.types import DictType, TypeRef
from numba import njit, dictobject, types, cgutils
from numba.extending import (
overload_method,
box,
unbox,
NativeValue
)
@njit
def _make_dict(keyty, valty):
return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))
@njit
def _length(d):
return len(d)
@njit
def _setitem(d, key, value):
d[key] = value
@njit
def _getitem(d, key):
return d[key]
@njit
def _delitem(d, key):
del d[key]
@njit
def _contains(d, key):
return key in d
@njit
def _get(d, key, default):
return d.get(key, default)
@njit
def _setdefault(d, key, default):
return d.setdefault(key, default)
@njit
def _iter(d):
return list(d.keys())
@njit
def _copy(d):
return d.copy()
def _from_meminfo_ptr(ptr, dicttype):
d = TypedDict(meminfo=ptr, dcttype=dicttype)
return d
class TypedDict(MutableMapping):
"""A typed-dictionary usable in Numba compiled functions.
Implements the MutableMapping interface.
"""
@classmethod
def empty(cls, key_type, value_type):
"""Create a new empty TypedDict with *key_type* and *value_type*
as the types for the keys and values of the dictionary respectively.
"""
return cls(dcttype=DictType(key_type, value_type))
def __init__(self, **kwargs):
"""
Parameters
----------
dcttype : numba.types.DictType; keyword-only
The dictionary type
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
self._dict_type, self._opaque = self._parse_arg(**kwargs)
def _parse_arg(self, dcttype, meminfo=None):
if not isinstance(dcttype, DictType):
raise TypeError('*dcttype* must be a DictType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_dict(dcttype.key_type, dcttype.value_type)
return dcttype, opaque
@property
def _numba_type_(self):
return self._dict_type
def __getitem__(self, key):
return _getitem(self, key)
def __setitem__(self, key, value):
return _setitem(self, key, value)
def __delitem__(self, key):
_delitem(self, key)
def __iter__(self):
return iter(_iter(self))
def __len__(self):
return _length(self)
def __contains__(self, key):
return _contains(self, key)
def __str__(self):
buf = []
for k, v in self.items():
buf.append("{}: {}".format(k, v))
return '{{{0}}}'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._dict_type)
return "{prefix}({body})".format(prefix=prefix, body=body)
def get(self, key, default=None):
return _get(self, key, default)
def setdefault(self, key, default=None):
return _setdefault(self, key, default)
def copy(self):
return _copy(self)
# XXX: should we have a better way to classmethod
@overload_method(TypeRef, 'empty')
def typeddict_empty(cls, key_type, value_type):
if cls.instance_type is not DictType:
return
def impl(cls, key_type, value_type):
return dictobject.new_dict(key_type, value_type)
return impl
@box(types.DictType)
def box_dicttype(typ, val, c):
context = c.context
builder = c.builder
# XXX deduplicate
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder, value=val)
# Returns the plain MemInfo
boxed_meminfo = c.box(
types.MemInfoPointer(types.voidptr),
dstruct.meminfo,
)
numba_name = c.context.insert_const_string(c.builder.module, 'numba')
numba_mod = c.pyapi.import_module_noblock(numba_name)
typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')
fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')
dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typeddict_mod)
c.pyapi.decref(numba_mod)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(types.DictType)
def unbox_dicttype(typ, val, c):
context = c.context
builder = c.builder
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
dstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(
data_pointer,
dictobject.ll_dict_type.as_pointer(),
)
dstruct.data = builder.load(data_pointer)
dstruct.meminfo = mi
dctobj = dstruct._getvalue()
c.pyapi.decref(miptr)
return NativeValue(dctobj)
|
flexible
|
{
"blob_id": "ad44e9411ba6a07c54bb55b0d8af9d0c16c6b71b",
"index": 5173,
"step-1": "<mask token>\n\n\ndef _from_meminfo_ptr(ptr, dicttype):\n d = TypedDict(meminfo=ptr, dcttype=dicttype)\n return d\n\n\nclass TypedDict(MutableMapping):\n \"\"\"A typed-dictionary usable in Numba compiled functions.\n\n Implements the MutableMapping interface.\n \"\"\"\n\n @classmethod\n def empty(cls, key_type, value_type):\n \"\"\"Create a new empty TypedDict with *key_type* and *value_type*\n as the types for the keys and values of the dictionary respectively.\n \"\"\"\n return cls(dcttype=DictType(key_type, value_type))\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters\n ----------\n dcttype : numba.types.DictType; keyword-only\n The dictionary type\n meminfo : MemInfo; keyword-only\n Used internally to pass the MemInfo object when boxing.\n \"\"\"\n self._dict_type, self._opaque = self._parse_arg(**kwargs)\n\n def _parse_arg(self, dcttype, meminfo=None):\n if not isinstance(dcttype, DictType):\n raise TypeError('*dcttype* must be a DictType')\n if meminfo is not None:\n opaque = meminfo\n else:\n opaque = _make_dict(dcttype.key_type, dcttype.value_type)\n return dcttype, opaque\n\n @property\n def _numba_type_(self):\n return self._dict_type\n\n def __getitem__(self, key):\n return _getitem(self, key)\n\n def __setitem__(self, key, value):\n return _setitem(self, key, value)\n\n def __delitem__(self, key):\n _delitem(self, key)\n\n def __iter__(self):\n return iter(_iter(self))\n\n def __len__(self):\n return _length(self)\n\n def __contains__(self, key):\n return _contains(self, key)\n\n def __str__(self):\n buf = []\n for k, v in self.items():\n buf.append('{}: {}'.format(k, v))\n return '{{{0}}}'.format(', '.join(buf))\n\n def __repr__(self):\n body = str(self)\n prefix = str(self._dict_type)\n return '{prefix}({body})'.format(prefix=prefix, body=body)\n\n def get(self, key, default=None):\n return _get(self, key, default)\n\n def setdefault(self, key, default=None):\n return _setdefault(self, key, default)\n\n def copy(self):\n return _copy(self)\n\n\n<mask token>\n\n\n@box(types.DictType)\ndef box_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder, value=val)\n boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)\n numba_name = c.context.insert_const_string(c.builder.module, 'numba')\n numba_mod = c.pyapi.import_module_noblock(numba_name)\n typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')\n fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')\n dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))\n res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))\n c.pyapi.decref(fmp_fn)\n c.pyapi.decref(typeddict_mod)\n c.pyapi.decref(numba_mod)\n c.pyapi.decref(boxed_meminfo)\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@njit\ndef _make_dict(keyty, valty):\n return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))\n\n\n<mask token>\n\n\n@njit\ndef _setitem(d, key, value):\n d[key] = value\n\n\n<mask token>\n\n\n@njit\ndef _delitem(d, key):\n del d[key]\n\n\n<mask token>\n\n\n@njit\ndef _get(d, key, default):\n return d.get(key, default)\n\n\n<mask token>\n\n\ndef _from_meminfo_ptr(ptr, dicttype):\n d = TypedDict(meminfo=ptr, dcttype=dicttype)\n return d\n\n\nclass TypedDict(MutableMapping):\n \"\"\"A typed-dictionary usable in Numba compiled functions.\n\n Implements the MutableMapping interface.\n \"\"\"\n\n @classmethod\n def empty(cls, key_type, value_type):\n \"\"\"Create a new empty TypedDict with *key_type* and *value_type*\n as the types for the keys and values of the dictionary respectively.\n \"\"\"\n return cls(dcttype=DictType(key_type, value_type))\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters\n ----------\n dcttype : numba.types.DictType; keyword-only\n The dictionary type\n meminfo : MemInfo; keyword-only\n Used internally to pass the MemInfo object when boxing.\n \"\"\"\n self._dict_type, self._opaque = self._parse_arg(**kwargs)\n\n def _parse_arg(self, dcttype, meminfo=None):\n if not isinstance(dcttype, DictType):\n raise TypeError('*dcttype* must be a DictType')\n if meminfo is not None:\n opaque = meminfo\n else:\n opaque = _make_dict(dcttype.key_type, dcttype.value_type)\n return dcttype, opaque\n\n @property\n def _numba_type_(self):\n return self._dict_type\n\n def __getitem__(self, key):\n return _getitem(self, key)\n\n def __setitem__(self, key, value):\n return _setitem(self, key, value)\n\n def __delitem__(self, key):\n _delitem(self, key)\n\n def __iter__(self):\n return iter(_iter(self))\n\n def __len__(self):\n return _length(self)\n\n def __contains__(self, key):\n return _contains(self, key)\n\n def __str__(self):\n buf = []\n for k, v in self.items():\n buf.append('{}: {}'.format(k, v))\n return '{{{0}}}'.format(', '.join(buf))\n\n def __repr__(self):\n body = str(self)\n prefix = str(self._dict_type)\n return '{prefix}({body})'.format(prefix=prefix, body=body)\n\n def get(self, key, default=None):\n return _get(self, key, default)\n\n def setdefault(self, key, default=None):\n return _setdefault(self, key, default)\n\n def copy(self):\n return _copy(self)\n\n\n@overload_method(TypeRef, 'empty')\ndef typeddict_empty(cls, key_type, value_type):\n if cls.instance_type is not DictType:\n return\n\n def impl(cls, key_type, value_type):\n return dictobject.new_dict(key_type, value_type)\n return impl\n\n\n@box(types.DictType)\ndef box_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder, value=val)\n boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)\n numba_name = c.context.insert_const_string(c.builder.module, 'numba')\n numba_mod = c.pyapi.import_module_noblock(numba_name)\n typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')\n fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')\n dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))\n res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))\n c.pyapi.decref(fmp_fn)\n c.pyapi.decref(typeddict_mod)\n c.pyapi.decref(numba_mod)\n c.pyapi.decref(boxed_meminfo)\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@njit\ndef _make_dict(keyty, valty):\n return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))\n\n\n@njit\ndef _length(d):\n return len(d)\n\n\n@njit\ndef _setitem(d, key, value):\n d[key] = value\n\n\n@njit\ndef _getitem(d, key):\n return d[key]\n\n\n@njit\ndef _delitem(d, key):\n del d[key]\n\n\n<mask token>\n\n\n@njit\ndef _get(d, key, default):\n return d.get(key, default)\n\n\n<mask token>\n\n\n@njit\ndef _copy(d):\n return d.copy()\n\n\ndef _from_meminfo_ptr(ptr, dicttype):\n d = TypedDict(meminfo=ptr, dcttype=dicttype)\n return d\n\n\nclass TypedDict(MutableMapping):\n \"\"\"A typed-dictionary usable in Numba compiled functions.\n\n Implements the MutableMapping interface.\n \"\"\"\n\n @classmethod\n def empty(cls, key_type, value_type):\n \"\"\"Create a new empty TypedDict with *key_type* and *value_type*\n as the types for the keys and values of the dictionary respectively.\n \"\"\"\n return cls(dcttype=DictType(key_type, value_type))\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters\n ----------\n dcttype : numba.types.DictType; keyword-only\n The dictionary type\n meminfo : MemInfo; keyword-only\n Used internally to pass the MemInfo object when boxing.\n \"\"\"\n self._dict_type, self._opaque = self._parse_arg(**kwargs)\n\n def _parse_arg(self, dcttype, meminfo=None):\n if not isinstance(dcttype, DictType):\n raise TypeError('*dcttype* must be a DictType')\n if meminfo is not None:\n opaque = meminfo\n else:\n opaque = _make_dict(dcttype.key_type, dcttype.value_type)\n return dcttype, opaque\n\n @property\n def _numba_type_(self):\n return self._dict_type\n\n def __getitem__(self, key):\n return _getitem(self, key)\n\n def __setitem__(self, key, value):\n return _setitem(self, key, value)\n\n def __delitem__(self, key):\n _delitem(self, key)\n\n def __iter__(self):\n return iter(_iter(self))\n\n def __len__(self):\n return _length(self)\n\n def __contains__(self, key):\n return _contains(self, key)\n\n def __str__(self):\n buf = []\n for k, v in self.items():\n buf.append('{}: {}'.format(k, v))\n return '{{{0}}}'.format(', '.join(buf))\n\n def __repr__(self):\n body = str(self)\n prefix = str(self._dict_type)\n return '{prefix}({body})'.format(prefix=prefix, body=body)\n\n def get(self, key, default=None):\n return _get(self, key, default)\n\n def setdefault(self, key, default=None):\n return _setdefault(self, key, default)\n\n def copy(self):\n return _copy(self)\n\n\n@overload_method(TypeRef, 'empty')\ndef typeddict_empty(cls, key_type, value_type):\n if cls.instance_type is not DictType:\n return\n\n def impl(cls, key_type, value_type):\n return dictobject.new_dict(key_type, value_type)\n return impl\n\n\n@box(types.DictType)\ndef box_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder, value=val)\n boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)\n numba_name = c.context.insert_const_string(c.builder.module, 'numba')\n numba_mod = c.pyapi.import_module_noblock(numba_name)\n typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')\n fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')\n dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))\n res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))\n c.pyapi.decref(fmp_fn)\n c.pyapi.decref(typeddict_mod)\n c.pyapi.decref(numba_mod)\n c.pyapi.decref(boxed_meminfo)\n return res\n\n\n@unbox(types.DictType)\ndef unbox_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n miptr = c.pyapi.object_getattr_string(val, '_opaque')\n native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)\n mi = native.value\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder)\n data_pointer = context.nrt.meminfo_data(builder, mi)\n data_pointer = builder.bitcast(data_pointer, dictobject.ll_dict_type.\n as_pointer())\n dstruct.data = builder.load(data_pointer)\n dstruct.meminfo = mi\n dctobj = dstruct._getvalue()\n c.pyapi.decref(miptr)\n return NativeValue(dctobj)\n",
"step-4": "<mask token>\n\n\n@njit\ndef _make_dict(keyty, valty):\n return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))\n\n\n@njit\ndef _length(d):\n return len(d)\n\n\n@njit\ndef _setitem(d, key, value):\n d[key] = value\n\n\n@njit\ndef _getitem(d, key):\n return d[key]\n\n\n@njit\ndef _delitem(d, key):\n del d[key]\n\n\n<mask token>\n\n\n@njit\ndef _get(d, key, default):\n return d.get(key, default)\n\n\n@njit\ndef _setdefault(d, key, default):\n return d.setdefault(key, default)\n\n\n<mask token>\n\n\n@njit\ndef _copy(d):\n return d.copy()\n\n\ndef _from_meminfo_ptr(ptr, dicttype):\n d = TypedDict(meminfo=ptr, dcttype=dicttype)\n return d\n\n\nclass TypedDict(MutableMapping):\n \"\"\"A typed-dictionary usable in Numba compiled functions.\n\n Implements the MutableMapping interface.\n \"\"\"\n\n @classmethod\n def empty(cls, key_type, value_type):\n \"\"\"Create a new empty TypedDict with *key_type* and *value_type*\n as the types for the keys and values of the dictionary respectively.\n \"\"\"\n return cls(dcttype=DictType(key_type, value_type))\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters\n ----------\n dcttype : numba.types.DictType; keyword-only\n The dictionary type\n meminfo : MemInfo; keyword-only\n Used internally to pass the MemInfo object when boxing.\n \"\"\"\n self._dict_type, self._opaque = self._parse_arg(**kwargs)\n\n def _parse_arg(self, dcttype, meminfo=None):\n if not isinstance(dcttype, DictType):\n raise TypeError('*dcttype* must be a DictType')\n if meminfo is not None:\n opaque = meminfo\n else:\n opaque = _make_dict(dcttype.key_type, dcttype.value_type)\n return dcttype, opaque\n\n @property\n def _numba_type_(self):\n return self._dict_type\n\n def __getitem__(self, key):\n return _getitem(self, key)\n\n def __setitem__(self, key, value):\n return _setitem(self, key, value)\n\n def __delitem__(self, key):\n _delitem(self, key)\n\n def __iter__(self):\n return iter(_iter(self))\n\n def __len__(self):\n return _length(self)\n\n def __contains__(self, key):\n return _contains(self, key)\n\n def __str__(self):\n buf = []\n for k, v in self.items():\n buf.append('{}: {}'.format(k, v))\n return '{{{0}}}'.format(', '.join(buf))\n\n def __repr__(self):\n body = str(self)\n prefix = str(self._dict_type)\n return '{prefix}({body})'.format(prefix=prefix, body=body)\n\n def get(self, key, default=None):\n return _get(self, key, default)\n\n def setdefault(self, key, default=None):\n return _setdefault(self, key, default)\n\n def copy(self):\n return _copy(self)\n\n\n@overload_method(TypeRef, 'empty')\ndef typeddict_empty(cls, key_type, value_type):\n if cls.instance_type is not DictType:\n return\n\n def impl(cls, key_type, value_type):\n return dictobject.new_dict(key_type, value_type)\n return impl\n\n\n@box(types.DictType)\ndef box_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder, value=val)\n boxed_meminfo = c.box(types.MemInfoPointer(types.voidptr), dstruct.meminfo)\n numba_name = c.context.insert_const_string(c.builder.module, 'numba')\n numba_mod = c.pyapi.import_module_noblock(numba_name)\n typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')\n fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')\n dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))\n res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))\n c.pyapi.decref(fmp_fn)\n c.pyapi.decref(typeddict_mod)\n c.pyapi.decref(numba_mod)\n c.pyapi.decref(boxed_meminfo)\n return res\n\n\n@unbox(types.DictType)\ndef unbox_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n miptr = c.pyapi.object_getattr_string(val, '_opaque')\n native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)\n mi = native.value\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder)\n data_pointer = context.nrt.meminfo_data(builder, mi)\n data_pointer = builder.bitcast(data_pointer, dictobject.ll_dict_type.\n as_pointer())\n dstruct.data = builder.load(data_pointer)\n dstruct.meminfo = mi\n dctobj = dstruct._getvalue()\n c.pyapi.decref(miptr)\n return NativeValue(dctobj)\n",
"step-5": "\"\"\"\nPython wrapper that connects CPython interpreter to the numba dictobject.\n\"\"\"\nfrom collections import MutableMapping\n\nfrom numba.types import DictType, TypeRef\nfrom numba import njit, dictobject, types, cgutils\nfrom numba.extending import (\n overload_method,\n box,\n unbox,\n NativeValue\n)\n\n\n@njit\ndef _make_dict(keyty, valty):\n return dictobject._as_meminfo(dictobject.new_dict(keyty, valty))\n\n\n@njit\ndef _length(d):\n return len(d)\n\n\n@njit\ndef _setitem(d, key, value):\n d[key] = value\n\n\n@njit\ndef _getitem(d, key):\n return d[key]\n\n\n@njit\ndef _delitem(d, key):\n del d[key]\n\n\n@njit\ndef _contains(d, key):\n return key in d\n\n\n@njit\ndef _get(d, key, default):\n return d.get(key, default)\n\n\n@njit\ndef _setdefault(d, key, default):\n return d.setdefault(key, default)\n\n\n@njit\ndef _iter(d):\n return list(d.keys())\n\n\n@njit\ndef _copy(d):\n return d.copy()\n\n\ndef _from_meminfo_ptr(ptr, dicttype):\n d = TypedDict(meminfo=ptr, dcttype=dicttype)\n return d\n\n\nclass TypedDict(MutableMapping):\n \"\"\"A typed-dictionary usable in Numba compiled functions.\n\n Implements the MutableMapping interface.\n \"\"\"\n @classmethod\n def empty(cls, key_type, value_type):\n \"\"\"Create a new empty TypedDict with *key_type* and *value_type*\n as the types for the keys and values of the dictionary respectively.\n \"\"\"\n return cls(dcttype=DictType(key_type, value_type))\n\n def __init__(self, **kwargs):\n \"\"\"\n Parameters\n ----------\n dcttype : numba.types.DictType; keyword-only\n The dictionary type\n meminfo : MemInfo; keyword-only\n Used internally to pass the MemInfo object when boxing.\n \"\"\"\n self._dict_type, self._opaque = self._parse_arg(**kwargs)\n\n def _parse_arg(self, dcttype, meminfo=None):\n if not isinstance(dcttype, DictType):\n raise TypeError('*dcttype* must be a DictType')\n\n if meminfo is not None:\n opaque = meminfo\n else:\n opaque = _make_dict(dcttype.key_type, dcttype.value_type)\n return dcttype, opaque\n\n @property\n def _numba_type_(self):\n return self._dict_type\n\n def __getitem__(self, key):\n return _getitem(self, key)\n\n def __setitem__(self, key, value):\n return _setitem(self, key, value)\n\n def __delitem__(self, key):\n _delitem(self, key)\n\n def __iter__(self):\n return iter(_iter(self))\n\n def __len__(self):\n return _length(self)\n\n def __contains__(self, key):\n return _contains(self, key)\n\n def __str__(self):\n buf = []\n for k, v in self.items():\n buf.append(\"{}: {}\".format(k, v))\n return '{{{0}}}'.format(', '.join(buf))\n\n def __repr__(self):\n body = str(self)\n prefix = str(self._dict_type)\n return \"{prefix}({body})\".format(prefix=prefix, body=body)\n\n def get(self, key, default=None):\n return _get(self, key, default)\n\n def setdefault(self, key, default=None):\n return _setdefault(self, key, default)\n\n def copy(self):\n return _copy(self)\n\n\n# XXX: should we have a better way to classmethod\n@overload_method(TypeRef, 'empty')\ndef typeddict_empty(cls, key_type, value_type):\n if cls.instance_type is not DictType:\n return\n\n def impl(cls, key_type, value_type):\n return dictobject.new_dict(key_type, value_type)\n\n return impl\n\n\n@box(types.DictType)\ndef box_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n\n # XXX deduplicate\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder, value=val)\n # Returns the plain MemInfo\n boxed_meminfo = c.box(\n types.MemInfoPointer(types.voidptr),\n dstruct.meminfo,\n )\n\n numba_name = c.context.insert_const_string(c.builder.module, 'numba')\n numba_mod = c.pyapi.import_module_noblock(numba_name)\n typeddict_mod = c.pyapi.object_getattr_string(numba_mod, 'typeddict')\n fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr')\n\n dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))\n\n res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, dicttype_obj))\n c.pyapi.decref(fmp_fn)\n c.pyapi.decref(typeddict_mod)\n c.pyapi.decref(numba_mod)\n c.pyapi.decref(boxed_meminfo)\n return res\n\n\n@unbox(types.DictType)\ndef unbox_dicttype(typ, val, c):\n context = c.context\n builder = c.builder\n\n miptr = c.pyapi.object_getattr_string(val, '_opaque')\n\n native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)\n\n mi = native.value\n ctor = cgutils.create_struct_proxy(typ)\n dstruct = ctor(context, builder)\n\n data_pointer = context.nrt.meminfo_data(builder, mi)\n data_pointer = builder.bitcast(\n data_pointer,\n dictobject.ll_dict_type.as_pointer(),\n )\n\n dstruct.data = builder.load(data_pointer)\n dstruct.meminfo = mi\n\n dctobj = dstruct._getvalue()\n c.pyapi.decref(miptr)\n\n return NativeValue(dctobj)\n",
"step-ids": [
19,
24,
28,
29,
33
]
}
|
[
19,
24,
28,
29,
33
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
BOT_NAME = 'reddit_scraper'
SPIDER_MODULES = ['reddit_scraper.spiders']
NEWSPIDER_MODULE = 'reddit_scraper.spiders'
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Scrapy settings for reddit_scraper project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'reddit_scraper'
SPIDER_MODULES = ['reddit_scraper.spiders']
NEWSPIDER_MODULE = 'reddit_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'reddit_scraper (+http://www.yourdomain.com)'
|
flexible
|
{
"blob_id": "a352768c2928cb7a33b9f1a31a0b3d8e56a8376a",
"index": 5588,
"step-1": "<mask token>\n",
"step-2": "BOT_NAME = 'reddit_scraper'\nSPIDER_MODULES = ['reddit_scraper.spiders']\nNEWSPIDER_MODULE = 'reddit_scraper.spiders'\n",
"step-3": "# -*- coding: utf-8 -*-\n\n# Scrapy settings for reddit_scraper project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'reddit_scraper'\n\nSPIDER_MODULES = ['reddit_scraper.spiders']\nNEWSPIDER_MODULE = 'reddit_scraper.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'reddit_scraper (+http://www.yourdomain.com)'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import cv2
def movemouse(event, x, y, flags, param):
global img
img2 = img.copy()
# img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
if event == cv2.EVENT_MOUSEMOVE:
font = cv2.FONT_HERSHEY_SIMPLEX
message = '{}'.format(img2[y, x])
cv2.putText(img2, message, (int(w / 2.5), int(h / 16)),font, 0.5, (255, 255, 255), 1)
cv2.circle(img2, (x, y), 1, (0, 0, 255), -1)
cv2.imshow('image', img2)
def main():
cv2.namedWindow("image")
cv2.setMouseCallback("image", movemouse)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
img = cv2.imread('./2.jpg')
img_size = img.shape
h, w = img_size[0:2]
main()
|
normal
|
{
"blob_id": "cae0aeea2ebd0a429cf6ecc9acab8f5f103e9669",
"index": 1989,
"step-1": "<mask token>\n\n\ndef main():\n cv2.namedWindow('image')\n cv2.setMouseCallback('image', movemouse)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef movemouse(event, x, y, flags, param):\n global img\n img2 = img.copy()\n if event == cv2.EVENT_MOUSEMOVE:\n font = cv2.FONT_HERSHEY_SIMPLEX\n message = '{}'.format(img2[y, x])\n cv2.putText(img2, message, (int(w / 2.5), int(h / 16)), font, 0.5,\n (255, 255, 255), 1)\n cv2.circle(img2, (x, y), 1, (0, 0, 255), -1)\n cv2.imshow('image', img2)\n\n\ndef main():\n cv2.namedWindow('image')\n cv2.setMouseCallback('image', movemouse)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef movemouse(event, x, y, flags, param):\n global img\n img2 = img.copy()\n if event == cv2.EVENT_MOUSEMOVE:\n font = cv2.FONT_HERSHEY_SIMPLEX\n message = '{}'.format(img2[y, x])\n cv2.putText(img2, message, (int(w / 2.5), int(h / 16)), font, 0.5,\n (255, 255, 255), 1)\n cv2.circle(img2, (x, y), 1, (0, 0, 255), -1)\n cv2.imshow('image', img2)\n\n\ndef main():\n cv2.namedWindow('image')\n cv2.setMouseCallback('image', movemouse)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n img = cv2.imread('./2.jpg')\n img_size = img.shape\n h, w = img_size[0:2]\n main()\n",
"step-4": "import cv2\n\n\ndef movemouse(event, x, y, flags, param):\n global img\n img2 = img.copy()\n if event == cv2.EVENT_MOUSEMOVE:\n font = cv2.FONT_HERSHEY_SIMPLEX\n message = '{}'.format(img2[y, x])\n cv2.putText(img2, message, (int(w / 2.5), int(h / 16)), font, 0.5,\n (255, 255, 255), 1)\n cv2.circle(img2, (x, y), 1, (0, 0, 255), -1)\n cv2.imshow('image', img2)\n\n\ndef main():\n cv2.namedWindow('image')\n cv2.setMouseCallback('image', movemouse)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n img = cv2.imread('./2.jpg')\n img_size = img.shape\n h, w = img_size[0:2]\n main()\n",
"step-5": "import cv2\n\ndef movemouse(event, x, y, flags, param):\n global img\n\n img2 = img.copy()\n # img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)\n\n if event == cv2.EVENT_MOUSEMOVE:\n font = cv2.FONT_HERSHEY_SIMPLEX\n message = '{}'.format(img2[y, x])\n cv2.putText(img2, message, (int(w / 2.5), int(h / 16)),font, 0.5, (255, 255, 255), 1)\n cv2.circle(img2, (x, y), 1, (0, 0, 255), -1)\n cv2.imshow('image', img2)\n\n\ndef main():\n cv2.namedWindow(\"image\")\n\n cv2.setMouseCallback(\"image\", movemouse)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n img = cv2.imread('./2.jpg')\n img_size = img.shape\n h, w = img_size[0:2]\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
<|reserved_special_token_0|>
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
<|reserved_special_token_1|>
<|reserved_special_token_0|>
npratio = 4
MAX_SENT_LENGTH = 30
MAX_SENTS = 50
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from keras import backend as K
npratio = 4
MAX_SENT_LENGTH = 30
MAX_SENTS = 50
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
======================
@author : Zhang Xu
@time : 2021/9/8:16:29
@email : [email protected]
@content : tensorflow subclassing 复现 NPA
======================
"""
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from keras import backend as K
npratio = 4
MAX_SENT_LENGTH = 30 # 一篇news的单词数量
MAX_SENTS = 50 # 一个用户的点击的news的数量
# news encoder
# 输入:user id, 1篇news的信息
# 输出:news representation
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
# user_id 部分
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
# news 部分
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
# personalized attention 部分
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
'''多输入:输入 user_id、 news_input'''
'''输入单个用户的 user id 和 一篇 news 的信息'''
user_id, news_input = inputs[0], inputs[1]
# qw
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
# news representation
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
# personalized attention
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
# NPA
# 输入:user id 和 该用户所有的 clicked news(N篇) 和 candidate news(K篇)
# 输出:对K篇 candidate news 做出预测,分别给出点击的概率
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
# user id 部分
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
# clicked news 部分
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
# candidate news 部分
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)]
# click prediction
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
# qd
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
# clicked news
clicked_news_vec = [0]*MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x, axis=1))(news) for news in clicked_news_vec], axis=1)
# qd 与 click_news_rep 进行 personalized attention
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep, news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep, attention_news_weight])
# candidate news
candidate_news_vec = [0]*(1+npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx])
# click prediction
# candidate news representation 与 user representation 进行 dot 和 softmax
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
|
flexible
|
{
"blob_id": "f3789d70f784345881f705fc809c49ad4e3526bc",
"index": 1287,
"step-1": "<mask token>\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n <mask token>\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n",
"step-2": "<mask token>\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n",
"step-3": "<mask token>\nnpratio = 4\nMAX_SENT_LENGTH = 30\nMAX_SENTS = 50\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n",
"step-4": "<mask token>\nimport tensorflow as tf\nfrom tensorflow.keras import *\nfrom tensorflow.keras.layers import *\nfrom keras import backend as K\nnpratio = 4\nMAX_SENT_LENGTH = 30\nMAX_SENTS = 50\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n======================\r\n@author : Zhang Xu\r\n@time : 2021/9/8:16:29\r\n@email : [email protected]\r\n@content : tensorflow subclassing 复现 NPA\r\n======================\r\n\"\"\"\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import *\r\nfrom tensorflow.keras.layers import *\r\nfrom keras import backend as K\r\n\r\nnpratio = 4\r\n\r\nMAX_SENT_LENGTH = 30 # 一篇news的单词数量\r\nMAX_SENTS = 50 # 一个用户的点击的news的数量\r\n\r\n# news encoder\r\n# 输入:user id, 1篇news的信息\r\n# 输出:news representation\r\nclass NewsEncoder(tf.keras.Model):\r\n\r\n def __init__(self):\r\n super(NewsEncoder, self).__init__(name='NewsEncoder')\r\n\r\n # user_id 部分\r\n self.userid_input_layer = Input()\r\n self.userid_embedding_layer = Embedding()\r\n self.userid_dense_layer = Dense()\r\n self.userid_flatten_layer = Flatten()\r\n\r\n # news 部分\r\n self.news_input_layer = Input()\r\n self.news_embedding_layer = Embedding()\r\n self.news_conv_layer = Conv1D()\r\n self.news_dropout_layer_1 = Dropout(0.2)\r\n self.news_dropout_layer_2 = Dropout(0.2)\r\n\r\n # personalized attention 部分\r\n self.pa_dense_layer = Dense()\r\n self.pa_2_1_dot_layer = Dot()\r\n self.pa_softmax_layer = Activation('softmax')\r\n self.pa_1_1_dot_layer = Dot()\r\n\r\n def call(self, inputs):\r\n '''多输入:输入 user_id、 news_input'''\r\n '''输入单个用户的 user id 和 一篇 news 的信息'''\r\n user_id, news_input = inputs[0], inputs[1]\r\n\r\n # qw\r\n x1 = self.userid_input_layer(user_id)\r\n x1 = self.userid_embedding_layer(x1)\r\n x1 = self.userid_dense_layer(x1)\r\n qw = self.userid_flatten_layer(x1)\r\n\r\n # news representation\r\n x2 = self.news_input_layer(news_input)\r\n x2 = self.news_embedding_layer(x2)\r\n x2 = self.news_dropout_layer_1(x2)\r\n x2 = self.news_conv_layer(x2)\r\n x2 = self.news_dropout_layer_2(x2)\r\n\r\n # personalized attention\r\n qw = self.pa_dense_layer(qw)\r\n attention_a = self.pa_2_1_dot_layer([x2, qw])\r\n attention_weight = self.pa_softmax_layer(attention_a)\r\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\r\n\r\n return news_rep\r\n\r\n\r\n# NPA\r\n# 输入:user id 和 该用户所有的 clicked news(N篇) 和 candidate news(K篇)\r\n# 输出:对K篇 candidate news 做出预测,分别给出点击的概率\r\nclass NPA(tf.keras.Model):\r\n\r\n def __init__(self):\r\n super(NPA, self).__init__(name='NPA')\r\n\r\n # user id 部分\r\n self.userid_input_layer = Input()\r\n self.userid_embedding_layer = Embedding()\r\n self.userid_dense_layer = Dense()\r\n self.userid_flatten_layer = Flatten()\r\n\r\n # clicked news 部分\r\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(MAX_SENTS)]\r\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\r\n self.clickednews_dense_layer = Dense()\r\n self.clickednews_2_1_dot_layer = Dot((2, 1))\r\n self.clickednews_softmax_layer = Activation('softmax')\r\n self.clickednews_1_1_dot_layer = Dot((1, 1))\r\n\r\n # candidate news 部分\r\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(1 + npratio)]\r\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)]\r\n\r\n # click prediction\r\n self.cp_dot_layer = dot()\r\n self.cp_concatenate = concatenate()\r\n self.cp_activation_layer = Activation('softmax')\r\n\r\n\r\n def call(self, inputs):\r\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\r\n\r\n # qd\r\n x1 = self.userid_input_layer(user_id)\r\n x1 = self.userid_embedding_layer(x1)\r\n x1 = self.userid_dense_layer(x1)\r\n qd = self.userid_flatten_layer(x1)\r\n\r\n # clicked news\r\n clicked_news_vec = [0]*MAX_SENTS\r\n for i in range(len(clicked_news)):\r\n xx = self.clickednews_input_layer[i](clicked_news[i])\r\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\r\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x, axis=1))(news) for news in clicked_news_vec], axis=1)\r\n\r\n # qd 与 click_news_rep 进行 personalized attention\r\n news_temp_dense = self.clickednews_dense_layer(qd)\r\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep, news_temp_dense])\r\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\r\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep, attention_news_weight])\r\n\r\n # candidate news\r\n candidate_news_vec = [0]*(1+npratio)\r\n for i in range(len(candidate_news)):\r\n xx = self.candidatenews_input_layer[i](candidate_news[i])\r\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx])\r\n\r\n # click prediction\r\n # candidate news representation 与 user representation 进行 dot 和 softmax\r\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for candidate_news in candidate_news_vec]\r\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\r\n\r\n return logits",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
from os.path import join
import json
import pandas as pd
import time
import numpy as np
import torch
def str2bool(v):
# convert string to boolean type for argparser input
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str_or_none(v):
# convert string to boolean type for argparser input
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
# helper functions for LDA arguments
def dic2name(dic):
return '_'.join(["{}-{}".format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}
def get_valid_types(TYPENAME):
with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile:
valid_types = json.load(typefile)[TYPENAME]
return valid_types
def df_index_gen(f, table=False):
# merge locator and dataset_id to genearte index table_id
f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)
if not table:
f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(":")[-1], axis = 1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
# load dataframe from pickle or create pickle file
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, "{}.pkl".format(name))
if os.path.exists(pkl_file):
print("{} pickle file found, loading...".format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
#process and save pkl
print("{} pickle file not found, creating...".format(pkl_file))
df = pd.read_csv(join(load_path, "{}.csv".format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print("{} Load complete. Time {}".format(name, time.time()-start))
return df
def logSumExpTensor(vec):
# vec -> 16, tag_size
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size())==2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size())==3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])
return (a-denom)
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose()- denom).transpose()
def logDot(a, b):
# numeric stable way of calculating log (e^a, e^b)
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C
|
normal
|
{
"blob_id": "a9302dbf724f9548411fbf2959f36b4cc5742ff8",
"index": 4999,
"step-1": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\n<mask token>\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-3": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-4": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r'\n ) as typefile:\n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-5": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch \n\ndef str2bool(v):\n # convert string to boolean type for argparser input\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n # convert string to boolean type for argparser input\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n# helper functions for LDA arguments\ndef dic2name(dic):\n return '_'.join([\"{}-{}\".format(k, dic[k]) for k in sorted(dic)])\n\ndef name2dic(s):\n return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile: \n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n # merge locator and dataset_id to genearte index table_id\n f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)\n if not table:\n f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(\":\")[-1], axis = 1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\n# load dataframe from pickle or create pickle file\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, \"{}.pkl\".format(name))\n if os.path.exists(pkl_file):\n print(\"{} pickle file found, loading...\".format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n #process and save pkl\n print(\"{} pickle file not found, creating...\".format(pkl_file))\n df = pd.read_csv(join(load_path, \"{}.csv\".format(name)))\n\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print(\"{} Load complete. Time {}\".format(name, time.time()-start))\n return df\n\ndef logSumExpTensor(vec):\n # vec -> 16, tag_size\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))\n\ndef logNormalizeTensor(a):\n\n denom = logSumExpTensor(a)\n if len(a.size())==2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size())==3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])\n return (a-denom)\n\ndef logNormalize(a):\n\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose()- denom).transpose()\n\ndef logDot(a, b):\n\n # numeric stable way of calculating log (e^a, e^b)\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C\n",
"step-ids": [
5,
8,
9,
12,
13
]
}
|
[
5,
8,
9,
12,
13
] |
<|reserved_special_token_0|>
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
def main():
for n in primes():
if n < 1000:
print(n)
else:
break
<|reserved_special_token_0|>
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
<|reserved_special_token_0|>
def run_proc(name):
print('Run child process %s %s' % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, start - end))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
def main():
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
return j * j
fs = []
for i in range(1, 4):
fs.append(f(i))
return fs
<|reserved_special_token_0|>
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
<|reserved_special_token_0|>
def run_proc(name):
print('Run child process %s %s' % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, start - end))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
return y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def _odd_iter():
n = 1
while True:
n += 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
def main():
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
return j * j
fs = []
for i in range(1, 4):
fs.append(f(i))
return fs
<|reserved_special_token_0|>
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
<|reserved_special_token_0|>
def run_proc(name):
print('Run child process %s %s' % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, start - end))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
return y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def _odd_iter():
n = 1
while True:
n += 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
def main():
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
return j * j
fs = []
for i in range(1, 4):
fs.append(f(i))
return fs
<|reserved_special_token_0|>
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
<|reserved_special_token_0|>
def run_proc(name):
print('Run child process %s %s' % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, start - end))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
return y
if __name__ == '__main__':
print(chinese_to_pinyin(u'陈'))
<|reserved_special_token_1|>
#!/usr/bin/python
# coding: utf-8
# # import re
# # import urllib
# #
# #
# # def getHtml(url):
# # page = urllib.urlopen(url)
# # html = page.read()
# # return html
# #
# #
# # def getMp4(html):
# # r = r"href='(http.*\.mp4)'"
# # re_mp4 = re.compile(r)
# # mp4List = re.findall(re_mp4, html)
# # filename = 1
# # for mp4url in mp4List:
# # urllib.urlretrieve(mp4url, "%s.mp4" % filename)
# # print 'file "%s.mp4" done' % filename
# # filename += 1
# # url = "http://v.youku.com/v_show/id_XMjYxMjEyNDU0MA==.html"
# # html = getHtml(url)
# # getMp4(html)
#
#
#
#
# # import re
# #
# #
# # pattern = re.compile(r'hello world')
# # match = pattern.match('hello world!')
# #
# # if match:
# # print match.group()
#
#
# #
# # # 冒泡排序
# # array = [4, 5, 0, 2, 3, 7, 1, 6]
# #
# # for i in range(len(array) - 1, 1, -1):
# # for j in range(0, i):
# # if array[j] > array[j + 1]:
# # array[j], array[j + 1] = array[j + 1], array[j]
# # print array
#
# # theString = 'saaaay yes no yaaaass'
# # print theString.strip('say') #say后面有空格
#
#
#
# # -*- coding:utf-8 -*-
# import urllib
# import urllib2
# import re
# import thread
# import time
#
#
# # 糗事百科爬虫类
# class QSBK:
# # 初始化方法,定义一些变量
# def __init__(self):
# self.pageIndex = 1
# self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# # 初始化headers
# self.headers = {'User-Agent': self.user_agent}
# # 存放段子的变量,每一个元素是每一页的段子们
# self.stories = []
# # 存放程序是否继续运行的变量
# self.enable = False
#
# # 传入某一页的索引获得页面代码
# def getPage(self, pageIndex):
# try:
# url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
# # 构建请求的request
# request = urllib2.Request(url, headers=self.headers)
# # 利用urlopen获取页面代码
# response = urllib2.urlopen(request)
# # 将页面转化为UTF-8编码
# pageCode = response.read().decode('utf-8')
# return pageCode
#
# except urllib2.URLError, e:
# if hasattr(e, "reason"):
# print u"连接糗事百科失败,错误原因", e.reason
# return None
#
# # 传入某一页代码,返回本页不带图片的段子列表
# def getPageItems(self, pageIndex):
# pageCode = self.getPage(pageIndex)
# if not pageCode:
# print "页面加载失败...."
# return None
# # pattern = re.compile('<div class=author clearfix>.*?<img src=.*? alt=(.*?)>.*?<div.*?' +
# # '<span>(.*?)</span>.*?stats-vote><i class=number>(.*?)</i>.*?' +
# # '<i class=number>(.*?)</i>', re.S)
# pattern = re.compile('h2>(.*?)</h2.*?content">(.*?)</.*?number">(.*?)</', re.S)
# items = re.findall(pattern, pageCode)
# # 用来存储每页的段子们
# pageStories = []
# # 遍历正则表达式匹配的信息
# # for item in items:
# # # 是否含有图片
# # haveImg = re.search("img", item[3])
# # # 如果不含有图片,把它加入list中
# # if not haveImg:
# # replaceBR = re.compile('<br/>')
# # text = re.sub(replaceBR, "\n", item[1])
# # # item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数
# # pageStories.append([item[0].strip(), text.strip(), item[2].strip(), item[4].strip()])
# # return pageStories
# for item in items:
# pageStories.append([item[0].strip(), item[1].strip(), item[2].strip()])
# return pageStories
#
# # 加载并提取页面的内容,加入到列表中
# def loadPage(self):
# # 如果当前未看的页数少于2页,则加载新一页
# if self.enable == True:
# if len(self.stories) < 2:
# # 获取新一页
# pageStories = self.getPageItems(self.pageIndex)
# # 将该页的段子存放到全局list中
# if pageStories:
# self.stories.append(pageStories)
# # 获取完之后页码索引加一,表示下次读取下一页
# self.pageIndex += 1
#
# # 调用该方法,每次敲回车打印输出一个段子
# def getOneStory(self, pageStories, page):
# # 遍历一页的段子
# for story in pageStories:
# # 等待用户输入
# input = raw_input()
# # 每当输入回车一次,判断一下是否要加载新页面
# self.loadPage()
# # 如果输入Q则程序结束
# if input == "Q":
# self.enable = False
# return
# print u"第%d页\t发布人:%s\t 赞:%s\n%s" % (page, story[0], story[2], story[1])
#
# # 开始方法
# def start(self):
# print u"正在读取糗事百科,按回车查看新段子,Q退出"
# # 使变量为True,程序可以正常运行
# self.enable = True
# # 先加载一页内容
# self.loadPage()
# # 局部变量,控制当前读到了第几页
# nowPage = 0
# while self.enable:
# if len(self.stories) > 0:
# # 从全局list中获取一页的段子
# pageStories = self.stories[0]
# # 当前读到的页数加一
# nowPage += 1
# # 将全局list中第一个元素删除,因为已经取出
# del self.stories[0]
# # 输出该页的段子
# self.getOneStory(pageStories, nowPage)
#
#
# spider = QSBK()
# spider.start()
#
# print [x * x for x in range(1, 11) if x % 2 == 0]
def _odd_iter():
n = 1
while True:
n += 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter() # 初始序列
while True:
n = next(it) # 返回序列的第一个数
yield n
it = filter(_not_divisible(n), it) # 构造新序列
def main():
# 打印1000以内的素数:
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
# def g():
return j*j
# return g
fs = []
for i in range(1, 4):
fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()
return fs
from PIL import Image
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
from multiprocessing import Process, Pool
import os, time, random
def run_proc(name):
print("Run child process %s %s" % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, (start - end)))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
# dic = {}
# with open("unicode_pinyin.txt") as f:
# for i in f.readlines():
# dic[i.split()[0]] = i.split()[1]
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
# try:
# y += dic[i] + ' '
# except:
# y += 'XXXX ' # 非法字符我们用XXXX代替
return y
if __name__ == '__main__':
# main()
# print(_not_divisible(3))
# output = filter(is_palindrome, range(1, 1000))
# print(list(output))
# print(range(100))[::-1]
# f1, f2, f3 = count()
# print(f1)
# print(f2)
# print(f3)
# changeImage()
# print("Parent process %s ", os.getpid())
# p = Process(target=run_proc, args=("test",))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
# print("Parent process %s ", os.getpid())
# p = Pool(5)
# for i in range(5):
# p.apply_async(long_time_task, args=(i,))
# print('Waiting for all subprocesses done...')
# p.close()
# p.join()
# print('All subprocesses done.')
print(chinese_to_pinyin(u"陈"))
|
flexible
|
{
"blob_id": "ad94118b43e130aec5df3976fd0460164de17511",
"index": 8361,
"step-1": "<mask token>\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\n<mask token>\n",
"step-3": "def _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\n<mask token>\n",
"step-4": "def _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisible(n), it)\n\n\ndef main():\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n\n def f(j):\n return j * j\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\n<mask token>\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\n<mask token>\n\n\ndef run_proc(name):\n print('Run child process %s %s' % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, start - end))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n return y\n\n\nif __name__ == '__main__':\n print(chinese_to_pinyin(u'陈'))\n",
"step-5": "#!/usr/bin/python\n# coding: utf-8\n\n\n# # import re\n# # import urllib\n# #\n# #\n# # def getHtml(url):\n# # page = urllib.urlopen(url)\n# # html = page.read()\n# # return html\n# #\n# #\n# # def getMp4(html):\n# # r = r\"href='(http.*\\.mp4)'\"\n# # re_mp4 = re.compile(r)\n# # mp4List = re.findall(re_mp4, html)\n# # filename = 1\n# # for mp4url in mp4List:\n# # urllib.urlretrieve(mp4url, \"%s.mp4\" % filename)\n# # print 'file \"%s.mp4\" done' % filename\n# # filename += 1\n# # url = \"http://v.youku.com/v_show/id_XMjYxMjEyNDU0MA==.html\"\n# # html = getHtml(url)\n# # getMp4(html)\n#\n#\n#\n#\n# # import re\n# #\n# #\n# # pattern = re.compile(r'hello world')\n# # match = pattern.match('hello world!')\n# #\n# # if match:\n# # print match.group()\n#\n#\n# #\n# # # 冒泡排序\n# # array = [4, 5, 0, 2, 3, 7, 1, 6]\n# #\n# # for i in range(len(array) - 1, 1, -1):\n# # for j in range(0, i):\n# # if array[j] > array[j + 1]:\n# # array[j], array[j + 1] = array[j + 1], array[j]\n# # print array\n#\n# # theString = 'saaaay yes no yaaaass'\n# # print theString.strip('say') #say后面有空格\n#\n#\n#\n# # -*- coding:utf-8 -*-\n# import urllib\n# import urllib2\n# import re\n# import thread\n# import time\n#\n#\n# # 糗事百科爬虫类\n# class QSBK:\n# # 初始化方法,定义一些变量\n# def __init__(self):\n# self.pageIndex = 1\n# self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n# # 初始化headers\n# self.headers = {'User-Agent': self.user_agent}\n# # 存放段子的变量,每一个元素是每一页的段子们\n# self.stories = []\n# # 存放程序是否继续运行的变量\n# self.enable = False\n#\n# # 传入某一页的索引获得页面代码\n# def getPage(self, pageIndex):\n# try:\n# url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)\n# # 构建请求的request\n# request = urllib2.Request(url, headers=self.headers)\n# # 利用urlopen获取页面代码\n# response = urllib2.urlopen(request)\n# # 将页面转化为UTF-8编码\n# pageCode = response.read().decode('utf-8')\n# return pageCode\n#\n# except urllib2.URLError, e:\n# if hasattr(e, \"reason\"):\n# print u\"连接糗事百科失败,错误原因\", e.reason\n# return None\n#\n# # 传入某一页代码,返回本页不带图片的段子列表\n# def getPageItems(self, pageIndex):\n# pageCode = self.getPage(pageIndex)\n# if not pageCode:\n# print \"页面加载失败....\"\n# return None\n# # pattern = re.compile('<div class=author clearfix>.*?<img src=.*? alt=(.*?)>.*?<div.*?' +\n# # '<span>(.*?)</span>.*?stats-vote><i class=number>(.*?)</i>.*?' +\n# # '<i class=number>(.*?)</i>', re.S)\n# pattern = re.compile('h2>(.*?)</h2.*?content\">(.*?)</.*?number\">(.*?)</', re.S)\n# items = re.findall(pattern, pageCode)\n# # 用来存储每页的段子们\n# pageStories = []\n# # 遍历正则表达式匹配的信息\n# # for item in items:\n# # # 是否含有图片\n# # haveImg = re.search(\"img\", item[3])\n# # # 如果不含有图片,把它加入list中\n# # if not haveImg:\n# # replaceBR = re.compile('<br/>')\n# # text = re.sub(replaceBR, \"\\n\", item[1])\n# # # item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数\n# # pageStories.append([item[0].strip(), text.strip(), item[2].strip(), item[4].strip()])\n# # return pageStories\n# for item in items:\n# pageStories.append([item[0].strip(), item[1].strip(), item[2].strip()])\n# return pageStories\n#\n# # 加载并提取页面的内容,加入到列表中\n# def loadPage(self):\n# # 如果当前未看的页数少于2页,则加载新一页\n# if self.enable == True:\n# if len(self.stories) < 2:\n# # 获取新一页\n# pageStories = self.getPageItems(self.pageIndex)\n# # 将该页的段子存放到全局list中\n# if pageStories:\n# self.stories.append(pageStories)\n# # 获取完之后页码索引加一,表示下次读取下一页\n# self.pageIndex += 1\n#\n# # 调用该方法,每次敲回车打印输出一个段子\n# def getOneStory(self, pageStories, page):\n# # 遍历一页的段子\n# for story in pageStories:\n# # 等待用户输入\n# input = raw_input()\n# # 每当输入回车一次,判断一下是否要加载新页面\n# self.loadPage()\n# # 如果输入Q则程序结束\n# if input == \"Q\":\n# self.enable = False\n# return\n# print u\"第%d页\\t发布人:%s\\t 赞:%s\\n%s\" % (page, story[0], story[2], story[1])\n#\n# # 开始方法\n# def start(self):\n# print u\"正在读取糗事百科,按回车查看新段子,Q退出\"\n# # 使变量为True,程序可以正常运行\n# self.enable = True\n# # 先加载一页内容\n# self.loadPage()\n# # 局部变量,控制当前读到了第几页\n# nowPage = 0\n# while self.enable:\n# if len(self.stories) > 0:\n# # 从全局list中获取一页的段子\n# pageStories = self.stories[0]\n# # 当前读到的页数加一\n# nowPage += 1\n# # 将全局list中第一个元素删除,因为已经取出\n# del self.stories[0]\n# # 输出该页的段子\n# self.getOneStory(pageStories, nowPage)\n#\n#\n# spider = QSBK()\n# spider.start()\n#\n# print [x * x for x in range(1, 11) if x % 2 == 0]\n\n\ndef _odd_iter():\n n = 1\n while True:\n n += 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(_not_divisible(n), it) # 构造新序列\n\n\ndef main():\n # 打印1000以内的素数:\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\ndef count():\n def f(j):\n # def g():\n return j*j\n # return g\n fs = []\n for i in range(1, 4):\n fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()\n return fs\n\n\nfrom PIL import Image\n\n\ndef changeImage():\n im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')\n print(im.format, im.size, im.mode)\n im.thumbnail((1000, 500))\n im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')\n\n\nfrom multiprocessing import Process, Pool\nimport os, time, random\n\n\ndef run_proc(name):\n print(\"Run child process %s %s\" % (name, os.getpid()))\n\n\ndef long_time_task(name):\n print('Run task %s %s...' % (name, os.getpid()))\n start = time.time()\n time.sleep(random.random() * 3)\n end = time.time()\n print('Task %s run %0.2f' % (name, (start - end)))\n\n\ndef chinese_to_pinyin(x):\n \"\"\"参数为字符串,返回为该字符串对应的汉语拼音\"\"\"\n y = ''\n # dic = {}\n # with open(\"unicode_pinyin.txt\") as f:\n # for i in f.readlines():\n # dic[i.split()[0]] = i.split()[1]\n for i in x:\n i = str(i.encode('unicode_escape'))[-5:-1].upper()\n # try:\n # y += dic[i] + ' '\n # except:\n # y += 'XXXX ' # 非法字符我们用XXXX代替\n return y\n\n\nif __name__ == '__main__':\n # main()\n # print(_not_divisible(3))\n # output = filter(is_palindrome, range(1, 1000))\n # print(list(output))\n # print(range(100))[::-1]\n # f1, f2, f3 = count()\n # print(f1)\n # print(f2)\n # print(f3)\n # changeImage()\n # print(\"Parent process %s \", os.getpid())\n # p = Process(target=run_proc, args=(\"test\",))\n # print('Child process will start.')\n # p.start()\n # p.join()\n # print('Child process end.')\n # print(\"Parent process %s \", os.getpid())\n # p = Pool(5)\n # for i in range(5):\n # p.apply_async(long_time_task, args=(i,))\n # print('Waiting for all subprocesses done...')\n # p.close()\n # p.join()\n # print('All subprocesses done.')\n print(chinese_to_pinyin(u\"陈\"))",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('jobs', '0001_initial')]
operations = [migrations.AddField(model_name='job', name='link', field=
models.URLField(null=True)), migrations.AddField(model_name='job',
name='title', field=models.CharField(default=datetime.date(2020, 10,
25), max_length=200), preserve_default=False)]
<|reserved_special_token_1|>
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('jobs', '0001_initial')]
operations = [migrations.AddField(model_name='job', name='link', field=
models.URLField(null=True)), migrations.AddField(model_name='job',
name='title', field=models.CharField(default=datetime.date(2020, 10,
25), max_length=200), preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 3.1.2 on 2020-10-25 01:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='link',
field=models.URLField(null=True),
),
migrations.AddField(
model_name='job',
name='title',
field=models.CharField(default=datetime.date(2020, 10, 25), max_length=200),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "562888201719456ed2f3c32e81ffd7d2c39dabc3",
"index": 7303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('jobs', '0001_initial')]\n operations = [migrations.AddField(model_name='job', name='link', field=\n models.URLField(null=True)), migrations.AddField(model_name='job',\n name='title', field=models.CharField(default=datetime.date(2020, 10,\n 25), max_length=200), preserve_default=False)]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('jobs', '0001_initial')]\n operations = [migrations.AddField(model_name='job', name='link', field=\n models.URLField(null=True)), migrations.AddField(model_name='job',\n name='title', field=models.CharField(default=datetime.date(2020, 10,\n 25), max_length=200), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.1.2 on 2020-10-25 01:19\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jobs', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='job',\n name='link',\n field=models.URLField(null=True),\n ),\n migrations.AddField(\n model_name='job',\n name='title',\n field=models.CharField(default=datetime.date(2020, 10, 25), max_length=200),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(
'django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(
'django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'smslink_phoneuser', 'last_contacted')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(
'django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'smslink_phoneuser', 'last_contacted')
models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':
'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [
], {'primary_key': 'True'}), 'requirement': (
'django.db.models.fields.CharField', [], {'unique': 'True',
'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {
'unique_together': "(('outward', 'inward'),)", 'object_name':
'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'}), 'inward': (
'django.db.models.fields.CharField', [], {'max_length': '5'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [],
{'null': 'True', 'blank': 'True'}), 'outward': (
'django.db.models.fields.CharField', [], {'max_length': '5',
'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {
'object_name': 'PhoneUser'}, u'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_contacted': ('django.db.models.fields.DateTimeField', [], {
'null': 'True', 'blank': 'True'}), 'number': (
'django.db.models.fields.CharField', [], {'max_length': '20',
'db_index': 'True'}), 'post_code': (
'django.db.models.fields.related.ForeignKey', [], {'to':
u"orm['foodproviders.PostCode']", 'null': 'True', 'blank': 'True'}),
'requirements_satisfied': (
'django.db.models.fields.related.ManyToManyField', [], {'to':
u"orm['foodproviders.EntryRequirement']", 'symmetrical': 'False'})}}
complete_apps = ['smslink']
<|reserved_special_token_1|>
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(
'django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'smslink_phoneuser', 'last_contacted')
models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':
'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [
], {'primary_key': 'True'}), 'requirement': (
'django.db.models.fields.CharField', [], {'unique': 'True',
'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {
'unique_together': "(('outward', 'inward'),)", 'object_name':
'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'}), 'inward': (
'django.db.models.fields.CharField', [], {'max_length': '5'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [],
{'null': 'True', 'blank': 'True'}), 'outward': (
'django.db.models.fields.CharField', [], {'max_length': '5',
'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {
'object_name': 'PhoneUser'}, u'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_contacted': ('django.db.models.fields.DateTimeField', [], {
'null': 'True', 'blank': 'True'}), 'number': (
'django.db.models.fields.CharField', [], {'max_length': '20',
'db_index': 'True'}), 'post_code': (
'django.db.models.fields.related.ForeignKey', [], {'to':
u"orm['foodproviders.PostCode']", 'null': 'True', 'blank': 'True'}),
'requirements_satisfied': (
'django.db.models.fields.related.ManyToManyField', [], {'to':
u"orm['foodproviders.EntryRequirement']", 'symmetrical': 'False'})}}
complete_apps = ['smslink']
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PhoneUser.last_contacted'
db.add_column(u'smslink_phoneuser', 'last_contacted',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PhoneUser.last_contacted'
db.delete_column(u'smslink_phoneuser', 'last_contacted')
models = {
u'foodproviders.entryrequirement': {
'Meta': {'object_name': 'EntryRequirement'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})
},
u'foodproviders.postcode': {
'Meta': {'unique_together': "(('outward', 'inward'),)", 'object_name': 'PostCode'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})
},
u'smslink.phoneuser': {
'Meta': {'object_name': 'PhoneUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foodproviders.PostCode']", 'null': 'True', 'blank': 'True'}),
'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['foodproviders.EntryRequirement']", 'symmetrical': 'False'})
}
}
complete_apps = ['smslink']
|
flexible
|
{
"blob_id": "2c1de638ac25a9f27b1af94fa075b7c1b9df6884",
"index": 993,
"step-1": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"step-4": "import datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"step-5": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'PhoneUser.last_contacted'\n db.add_column(u'smslink_phoneuser', 'last_contacted',\n self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'PhoneUser.last_contacted'\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n\n\n models = {\n u'foodproviders.entryrequirement': {\n 'Meta': {'object_name': 'EntryRequirement'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})\n },\n u'foodproviders.postcode': {\n 'Meta': {'unique_together': \"(('outward', 'inward'),)\", 'object_name': 'PostCode'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),\n 'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})\n },\n u'smslink.phoneuser': {\n 'Meta': {'object_name': 'PhoneUser'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),\n 'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),\n 'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})\n }\n }\n\n complete_apps = ['smslink']",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
<|reserved_special_token_0|>
def isDone(self):
return self.state == counter_state.done
<|reserved_special_token_0|>
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32, 300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
<|reserved_special_token_0|>
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32, 300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
def count_to_max(self, maxValue):
if self.state == counter_state.idle:
self.counter << 0
self.counter_max << maxValue
self.state << counter_state.running
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32, 300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class counter_state(Enum):
idle = auto()
running = auto()
done = auto()
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
def count_to_max(self, maxValue):
if self.state == counter_state.idle:
self.counter << 0
self.counter_max << maxValue
self.state << counter_state.running
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32, 300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
<|reserved_special_token_0|>
convert_to_hdl(my_first_instance, 'myFirst')
<|reserved_special_token_1|>
from HDPython import *
import HDPython.examples as ahe
from enum import Enum, auto
class counter_state(Enum):
idle = auto()
running = auto()
done = auto()
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
def count_to_max(self, maxValue):
if self.state == counter_state.idle:
self.counter << 0
self.counter_max << maxValue
self.state << counter_state.running
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32,300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
my_first_instance = v_create(my_first_test_bench())
convert_to_hdl(my_first_instance, "myFirst")
|
flexible
|
{
"blob_id": "046db03b146ce0182ba7889908f536a09de051d5",
"index": 5069,
"step-1": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n <mask token>\n\n def isDone(self):\n return self.state == counter_state.done\n <mask token>\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n <mask token>\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0\n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass counter_state(Enum):\n idle = auto()\n running = auto()\n done = auto()\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0\n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\nconvert_to_hdl(my_first_instance, 'myFirst')\n",
"step-5": "from HDPython import *\nimport HDPython.examples as ahe\nfrom enum import Enum, auto\n\nclass counter_state(Enum):\n idle = auto()\n running = auto()\n done = auto()\n\nclass Counter_cl(v_class_master):\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0 \n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\nclass my_first_test_bench(v_entity):\n def __init__(self):\n super().__init__()\n self.architecture()\n\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32,300))\n\n\n clkgen = v_create(ahe.clk_generator())\n\n cnt = Counter_cl()\n\n\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n\n \n\n end_architecture()\n\n\nmy_first_instance = v_create(my_first_test_bench())\n\nconvert_to_hdl(my_first_instance, \"myFirst\")",
"step-ids": [
7,
8,
9,
12,
15
]
}
|
[
7,
8,
9,
12,
15
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
plugins_list = []
class PluginType(type):
def __init__(cls, name, bases, attrs):
super(PluginType, cls).__init__(name, bases, attrs)
# registrar el plugin en la lista
if not cls in plugins_list:
plugins_list.append(cls)
class PluginBase(object):
'''
Clase base para todos los plugins
'''
__metaclass__ = PluginType
pass
|
normal
|
{
"blob_id": "b670655e3a8e88b97eed35e187b01d6524a16af3",
"index": 7709,
"step-1": "<mask token>\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-2": "<mask token>\n\n\nclass PluginType(type):\n <mask token>\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-3": "<mask token>\n\n\nclass PluginType(type):\n\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-4": "plugins_list = []\n\n\nclass PluginType(type):\n\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n \"\"\"\n Clase base para todos los plugins\n \"\"\"\n __metaclass__ = PluginType\n pass\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nplugins_list = []\n\nclass PluginType(type):\n def __init__(cls, name, bases, attrs):\n super(PluginType, cls).__init__(name, bases, attrs)\n\n # registrar el plugin en la lista\n if not cls in plugins_list:\n plugins_list.append(cls)\n\n\nclass PluginBase(object):\n '''\n Clase base para todos los plugins\n '''\n\n __metaclass__ = PluginType\n\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
统计飞船信息
"""
class GameStats:
def __init__(self, setting):
self.setting = setting
self.ships_left = self.setting.ship_limit
self.game_active = True
|
normal
|
{
"blob_id": "3ab26612111e3df59f41f5b5e0bf23398e015a8a",
"index": 1595,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GameStats:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GameStats:\n\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n",
"step-4": "\"\"\"\n统计飞船信息\n\"\"\"\n\n\nclass GameStats:\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
默认查询所有
> db.test1000.find()
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
查询匹配参数
> db.test1000.find({'name':'zhangdapeng'})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
>
"""
"""
小于$lt
小于等于$lte
大于$gt
大于等于$gte
不等于$ne
查询年龄小于等于18岁的
> db.test1000.find({age:{$lte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
查询年龄大于等于18岁的
> db.test1000.find({age:{$gte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
范围 $in $nin不在某个范围类
> db.test1000.find({age:{$in:[17,18,19]}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
逻辑查询
并且关系直接用,逗号
或关系$or
> db.test1000.find({$or:[{'age':18},{'age':19}]})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
>
正则表达式
直接用两个/正则表达式就行/
> db.test1000.find({'name':/zhangdapeng*/})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
限制内容-输出控制
find().limit(数字)
find().skip(数字)
同时使用可以实现翻页
find().skip(5).limit(20)
自定义查询
db.stu.find({
$where:function(){
return this.age>30;
}
})
"""
|
flexible
|
{
"blob_id": "d8e0198244c3df77fa0258cc97a55042e36d056f",
"index": 7756,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n默认查询所有\n > db.test1000.find()\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n { \"_id\" : ObjectId(\"5c3559af648171cce9135dd7\"), \"name\" : \"zhangdapeng1\" }\n { \"_id\" : ObjectId(\"5c3559b2648171cce9135dd8\"), \"name\" : \"zhangdapeng2\" }\n { \"_id\" : ObjectId(\"5c3559b4648171cce9135dd9\"), \"name\" : \"zhangdapeng3\" }\n\n查询匹配参数\n > db.test1000.find({'name':'zhangdapeng'})\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n >\n\n\"\"\"\n\n\"\"\"\n小于$lt\n小于等于$lte\n大于$gt\n大于等于$gte\n不等于$ne\n\n查询年龄小于等于18岁的\n > db.test1000.find({age:{$lte:18}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n > \n查询年龄大于等于18岁的\n > db.test1000.find({age:{$gte:18}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n\n范围 $in $nin不在某个范围类\n > db.test1000.find({age:{$in:[17,18,19]}})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n\n\n逻辑查询\n 并且关系直接用,逗号\n 或关系$or\n > db.test1000.find({$or:[{'age':18},{'age':19}]})\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n > \n正则表达式\n 直接用两个/正则表达式就行/ \n > db.test1000.find({'name':/zhangdapeng*/})\n { \"_id\" : ObjectId(\"5c3559ab648171cce9135dd6\"), \"name\" : \"zhangdapeng\" }\n { \"_id\" : ObjectId(\"5c3559af648171cce9135dd7\"), \"name\" : \"zhangdapeng1\" }\n { \"_id\" : ObjectId(\"5c3559b2648171cce9135dd8\"), \"name\" : \"zhangdapeng2\" }\n { \"_id\" : ObjectId(\"5c3559b4648171cce9135dd9\"), \"name\" : \"zhangdapeng3\" }\n { \"_id\" : ObjectId(\"5c355a61648171cce9135dda\"), \"name\" : \"zhangdapeng3\", \"age\" : 18 }\n { \"_id\" : ObjectId(\"5c355a65648171cce9135ddb\"), \"name\" : \"zhangdapeng3\", \"age\" : 19 }\n { \"_id\" : ObjectId(\"5c355a69648171cce9135ddc\"), \"name\" : \"zhangdapeng3\", \"age\" : 17 }\n > \n限制内容-输出控制\n find().limit(数字)\n find().skip(数字)\n 同时使用可以实现翻页\n find().skip(5).limit(20)\n \n自定义查询\n db.stu.find({\n $where:function(){\n return this.age>30;\n }\n })\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@main.route('/')
def index():
return render_template('templates/index.html')
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
<|reserved_special_token_1|>
from flask import render_template, request, current_app
from . import main
from .. import db, cache
from ..models import Content
from ..utils import make_cache_key
import requests
@main.route('/')
def index():
return render_template('templates/index.html')
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
<|reserved_special_token_1|>
from flask import render_template, request, current_app
from . import main
from .. import db, cache
from ..models import Content
from ..utils import make_cache_key
import requests
@main.route('/')
def index():
return render_template("templates/index.html")
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True,
'audio': True,
'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success',
'data': content.to_json(iframe=params['iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
|
flexible
|
{
"blob_id": "c4096cfae7182875a79ba7837187cd94b4379922",
"index": 1100,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('templates/index.html')\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-4": "from flask import render_template, request, current_app\nfrom . import main\nfrom .. import db, cache\nfrom ..models import Content\nfrom ..utils import make_cache_key\nimport requests\n\n\[email protected]('/')\ndef index():\n return render_template('templates/index.html')\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-5": "from flask import render_template, request, current_app\nfrom . import main\nfrom .. import db, cache\nfrom ..models import Content\nfrom ..utils import make_cache_key\nimport requests\n\n\[email protected]('/')\ndef index():\n return render_template(\"templates/index.html\")\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n\n url = request.args.get('url')\n\n params = {'video': True,\n 'audio': True,\n 'screenshot': False}\n\n if request.args.get('iframe'):\n params['iframe'] = True\n\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n\n content = Content.query.filter_by(url=url).first()\n\n if content:\n return {'status': 'success',\n 'data': content.to_json(iframe=params['iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n\n return r.json(), 200\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Container(containers.DeclarativeContainer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
cache_repository = providers.Singleton(MemcachedRepository, host=config
.host, port=config.port)
fibonacci_service = providers.Factory(FibonacciService,
cache_repository=cache_repository)
<|reserved_special_token_1|>
from dependency_injector import containers, providers
from src.repositories import MemcachedRepository
from src.services import FibonacciService
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
cache_repository = providers.Singleton(MemcachedRepository, host=config
.host, port=config.port)
fibonacci_service = providers.Factory(FibonacciService,
cache_repository=cache_repository)
<|reserved_special_token_1|>
from dependency_injector import containers, providers
from src.repositories import MemcachedRepository
from src.services import FibonacciService
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
cache_repository = providers.Singleton(MemcachedRepository,
host=config.host,
port=config.port)
fibonacci_service = providers.Factory(
FibonacciService,
cache_repository=cache_repository,
)
|
flexible
|
{
"blob_id": "e8ba1ae98b247eaf90d83339e5fdc27287a70c73",
"index": 2561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Container(containers.DeclarativeContainer):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n cache_repository = providers.Singleton(MemcachedRepository, host=config\n .host, port=config.port)\n fibonacci_service = providers.Factory(FibonacciService,\n cache_repository=cache_repository)\n",
"step-4": "from dependency_injector import containers, providers\nfrom src.repositories import MemcachedRepository\nfrom src.services import FibonacciService\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n cache_repository = providers.Singleton(MemcachedRepository, host=config\n .host, port=config.port)\n fibonacci_service = providers.Factory(FibonacciService,\n cache_repository=cache_repository)\n",
"step-5": "from dependency_injector import containers, providers\n\nfrom src.repositories import MemcachedRepository\nfrom src.services import FibonacciService\n\n\nclass Container(containers.DeclarativeContainer):\n config = providers.Configuration()\n\n cache_repository = providers.Singleton(MemcachedRepository,\n host=config.host,\n port=config.port)\n\n fibonacci_service = providers.Factory(\n FibonacciService,\n cache_repository=cache_repository,\n )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tkinter as tk
from telnetConn import telnetConnection
fields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'
def fetch(entries):
input_list = []
for entry in entries:
field = entry[0]
text = entry[1].get()
input_list.append(text)
# print('%s: "%s"' % (field, text))
telnetConnection(input_list[0],input_list[1],input_list[2],input_list[3],input_list[4])
def makeform(root, fields):
entries = []
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=15, text=field, anchor='w')
ent = tk.Entry(row)
row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)
entries.append((field, ent))
return entries
if __name__ == '__main__':
root = tk.Tk()
ents = makeform(root, fields)
root.bind('<Return>', (lambda event, e=ents: fetch(e)))
btnSend = tk.Button(root, text='Send',
command=(lambda e=ents: fetch(e)))
btnSend.pack(side=tk.LEFT, padx=5, pady=5)
btnQuit = tk.Button(root, text='Quit', command=root.quit)
btnQuit.pack(side=tk.LEFT, padx=5, pady=5)
root.mainloop()
|
normal
|
{
"blob_id": "3328c2ae0816c146398ecde92a056d1e77683696",
"index": 7357,
"step-1": "<mask token>\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-3": "<mask token>\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-4": "import tkinter as tk\nfrom telnetConn import telnetConnection\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\n\n\ndef fetch(entries):\n input_list = []\n for entry in entries:\n field = entry[0]\n text = entry[1].get()\n input_list.append(text)\n telnetConnection(input_list[0], input_list[1], input_list[2],\n input_list[3], input_list[4])\n\n\ndef makeform(root, fields):\n entries = []\n for field in fields:\n row = tk.Frame(root)\n lab = tk.Label(row, width=15, text=field, anchor='w')\n ent = tk.Entry(row)\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\n entries.append((field, ent))\n return entries\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n ents = makeform(root, fields)\n root.bind('<Return>', lambda event, e=ents: fetch(e))\n btnSend = tk.Button(root, text='Send', command=lambda e=ents: fetch(e))\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\n root.mainloop()\n",
"step-5": "import tkinter as tk\r\nfrom telnetConn import telnetConnection\r\n\r\n\r\nfields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text'\r\n\r\ndef fetch(entries):\r\n input_list = []\r\n for entry in entries:\r\n field = entry[0]\r\n text = entry[1].get()\r\n input_list.append(text)\r\n # print('%s: \"%s\"' % (field, text)) \r\n telnetConnection(input_list[0],input_list[1],input_list[2],input_list[3],input_list[4])\r\n \r\n\r\n\r\ndef makeform(root, fields):\r\n entries = []\r\n for field in fields:\r\n row = tk.Frame(root)\r\n lab = tk.Label(row, width=15, text=field, anchor='w')\r\n ent = tk.Entry(row)\r\n row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5)\r\n lab.pack(side=tk.LEFT)\r\n ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X)\r\n entries.append((field, ent))\r\n return entries\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n ents = makeform(root, fields)\r\n root.bind('<Return>', (lambda event, e=ents: fetch(e))) \r\n\r\n btnSend = tk.Button(root, text='Send',\r\n command=(lambda e=ents: fetch(e))) \r\n btnSend.pack(side=tk.LEFT, padx=5, pady=5)\r\n\r\n btnQuit = tk.Button(root, text='Quit', command=root.quit)\r\n btnQuit.pack(side=tk.LEFT, padx=5, pady=5)\r\n\r\n root.mainloop()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
pal = []
for i in range(100, 1000):
for j in range(100, 1000):
s = str(i * j)
if s[::-1] == s:
pal.append(int(s))
print(max(pal))
|
normal
|
{
"blob_id": "179a9cf0713001e361f39aa30192618b392c78c7",
"index": 6972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(100, 1000):\n for j in range(100, 1000):\n s = str(i * j)\n if s[::-1] == s:\n pal.append(int(s))\nprint(max(pal))\n",
"step-3": "pal = []\nfor i in range(100, 1000):\n for j in range(100, 1000):\n s = str(i * j)\n if s[::-1] == s:\n pal.append(int(s))\nprint(max(pal))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_names():
"""
Opening, reading name file and building name array.
"""
with open(input_names_file, 'r') as data:
plaintext = data.read()
name_array = plaintext.split('\n')
final_name_list = []
for name in name_array:
if len(name.split(',')) == 2:
temp_name_list = re.split(reg_ex, name)
last_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(last_name + ',' + first_name)
elif len(name.split(' ')) == 2:
final_name_list.append(name.replace(' ', ','))
elif len(name.split(' ')) == 3:
temp_name_list = re.split(' ', name)
last_name = temp_name_list.pop()
middle_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(first_name + ',' + middle_name + ' ' +
last_name)
else:
final_name_list.append(name)
with open(output_names_file, 'w') as txt_file:
txt_file.write('first_name,last_name' + '\n')
for name in final_name_list:
txt_file.write(name + '\n')
names_df = pd.read_csv(output_names_file, names=name_header, sep=',',
engine='python')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
instruments_file = os.path.abspath('instruments.csv')
input_names_file = os.path.abspath('names.txt')
output_names_file = os.path.abspath('names.csv')
inst_name_file = os.path.abspath('name_instrument.csv')
reg_ex = '; |, |\\*|\n'
name_header = ['first_name', 'last_name']
def process_names():
"""
Opening, reading name file and building name array.
"""
with open(input_names_file, 'r') as data:
plaintext = data.read()
name_array = plaintext.split('\n')
final_name_list = []
for name in name_array:
if len(name.split(',')) == 2:
temp_name_list = re.split(reg_ex, name)
last_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(last_name + ',' + first_name)
elif len(name.split(' ')) == 2:
final_name_list.append(name.replace(' ', ','))
elif len(name.split(' ')) == 3:
temp_name_list = re.split(' ', name)
last_name = temp_name_list.pop()
middle_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(first_name + ',' + middle_name + ' ' +
last_name)
else:
final_name_list.append(name)
with open(output_names_file, 'w') as txt_file:
txt_file.write('first_name,last_name' + '\n')
for name in final_name_list:
txt_file.write(name + '\n')
names_df = pd.read_csv(output_names_file, names=name_header, sep=',',
engine='python')
<|reserved_special_token_1|>
import re
import os
import pandas as pd
instruments_file = os.path.abspath('instruments.csv')
input_names_file = os.path.abspath('names.txt')
output_names_file = os.path.abspath('names.csv')
inst_name_file = os.path.abspath('name_instrument.csv')
reg_ex = '; |, |\\*|\n'
name_header = ['first_name', 'last_name']
def process_names():
"""
Opening, reading name file and building name array.
"""
with open(input_names_file, 'r') as data:
plaintext = data.read()
name_array = plaintext.split('\n')
final_name_list = []
for name in name_array:
if len(name.split(',')) == 2:
temp_name_list = re.split(reg_ex, name)
last_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(last_name + ',' + first_name)
elif len(name.split(' ')) == 2:
final_name_list.append(name.replace(' ', ','))
elif len(name.split(' ')) == 3:
temp_name_list = re.split(' ', name)
last_name = temp_name_list.pop()
middle_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(first_name + ',' + middle_name + ' ' +
last_name)
else:
final_name_list.append(name)
with open(output_names_file, 'w') as txt_file:
txt_file.write('first_name,last_name' + '\n')
for name in final_name_list:
txt_file.write(name + '\n')
names_df = pd.read_csv(output_names_file, names=name_header, sep=',',
engine='python')
<|reserved_special_token_1|>
import re
import os
import pandas as pd
instruments_file = os.path.abspath("instruments.csv")
input_names_file = os.path.abspath("names.txt")
output_names_file = os.path.abspath("names.csv")
inst_name_file = os.path.abspath("name_instrument.csv")
reg_ex = '; |, |\\*|\n'
name_header = ["first_name", "last_name"]
def process_names():
"""
Opening, reading name file and building name array.
"""
with open(input_names_file, 'r') as data:
plaintext = data.read()
name_array = plaintext.split('\n')
# Final name list
final_name_list = []
# Parsing different name formats and standardizing to create csv
for name in name_array:
if len(name.split(',')) == 2:
temp_name_list = re.split(reg_ex, name)
last_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(last_name + ',' + first_name)
elif len(name.split(' ')) == 2:
final_name_list.append(name.replace(' ', ','))
elif len(name.split(' ')) == 3:
temp_name_list = re.split(' ', name)
last_name = temp_name_list.pop()
middle_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)
else:
final_name_list.append(name)
# Writing final name list to a file
with open(output_names_file, "w") as txt_file:
txt_file.write("first_name,last_name" + "\n")
for name in final_name_list:
txt_file.write(name + "\n") # works with any number of elements in a line
names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')
|
flexible
|
{
"blob_id": "8c539dbbb762717393b9a71ddca8eb3872890854",
"index": 288,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-3": "<mask token>\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-4": "import re\nimport os\nimport pandas as pd\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-5": "import re\nimport os\nimport pandas as pd\n\ninstruments_file = os.path.abspath(\"instruments.csv\")\ninput_names_file = os.path.abspath(\"names.txt\")\noutput_names_file = os.path.abspath(\"names.csv\")\ninst_name_file = os.path.abspath(\"name_instrument.csv\")\nreg_ex = '; |, |\\\\*|\\n'\nname_header = [\"first_name\", \"last_name\"]\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import argparse
import sys
import os
import cmudl.hw2p2 as hw2p2
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='CMU Deep Learning Utilities',
)
parser.add_argument('command', help='Subcommand to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
CLI()
|
normal
|
{
"blob_id": "0f74e0f0600c373c3ddd470f18dbb86cf213fb58",
"index": 9257,
"step-1": "<mask token>\n\n\nclass CLI(object):\n <mask token>\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\nCLI()\n",
"step-4": "import argparse\nimport sys\nimport os\nimport cmudl.hw2p2 as hw2p2\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\n 'CMU Deep Learning Utilities')\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\n\nCLI()\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os\nimport cmudl.hw2p2 as hw2p2\n\nclass CLI(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description='CMU Deep Learning Utilities',\n )\n parser.add_argument('command', help='Subcommand to run')\n # parse_args defaults to [1:] for args, but you need to\n # exclude the rest of the args too, or validation will fail\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n # use dispatch pattern to invoke method with same name\n getattr(self, args.command)()\n\n def hw2p2(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', type=str, default=None)\n args = parser.parse_args(sys.argv[2:])\n hw2p2.submit(args.s)\n\nCLI()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class MyTestCase(unittest.TestCase):
<|reserved_special_token_0|>
def test_2(self):
a = t(20)
b = t(1)
a.left = b
c = t(40)
a.right = c
d = t(35)
c.left = d
self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyTestCase(unittest.TestCase):
def test_1(self):
a = t(2)
b = t(1)
a.left = b
self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])
def test_2(self):
a = t(20)
b = t(1)
a.left = b
c = t(40)
a.right = c
d = t(35)
c.left = d
self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
__author__ = 'lei'
<|reserved_special_token_0|>
class MyTestCase(unittest.TestCase):
def test_1(self):
a = t(2)
b = t(1)
a.left = b
self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])
def test_2(self):
a = t(20)
b = t(1)
a.left = b
c = t(40)
a.right = c
d = t(35)
c.left = d
self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
__author__ = 'lei'
import unittest
from ch3.node import TreeNode as t
import ch3.searchRange as sr
class MyTestCase(unittest.TestCase):
def test_1(self):
a = t(2)
b = t(1)
a.left = b
self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])
def test_2(self):
a = t(20)
b = t(1)
a.left = b
c = t(40)
a.right = c
d = t(35)
c.left = d
self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "c63e5a2178e82ec6e0e1e91a81145afb735bf7bf",
"index": 216,
"step-1": "<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n <mask token>\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "__author__ = 'lei'\n<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "__author__ = 'lei'\nimport unittest\nfrom ch3.node import TreeNode as t\nimport ch3.searchRange as sr\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class NewState(ObstacleState):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewState(ObstacleState):
<|reserved_special_token_0|>
def hit(self) ->None:
"""
Just remove hit points of obstacle and change state
"""
self._obstacle.hit_points -= 1
self._obstacle.state = HittedState(self._obstacle)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewState(ObstacleState):
@property
def delete(self) ->bool:
"""
Don't delete this obstacle
:return: False
"""
return False
def hit(self) ->None:
"""
Just remove hit points of obstacle and change state
"""
self._obstacle.hit_points -= 1
self._obstacle.state = HittedState(self._obstacle)
<|reserved_special_token_1|>
from angrytux.model.game_objects.obstacle_states.HittedState import HittedState
from angrytux.model.game_objects.obstacle_states.ObstacleState import ObstacleState
class NewState(ObstacleState):
@property
def delete(self) ->bool:
"""
Don't delete this obstacle
:return: False
"""
return False
def hit(self) ->None:
"""
Just remove hit points of obstacle and change state
"""
self._obstacle.hit_points -= 1
self._obstacle.state = HittedState(self._obstacle)
|
flexible
|
{
"blob_id": "7d21e76383b80e8a4433fb11cb3b64efee7a6d3b",
"index": 7008,
"step-1": "<mask token>\n\n\nclass NewState(ObstacleState):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NewState(ObstacleState):\n <mask token>\n\n def hit(self) ->None:\n \"\"\"\n Just remove hit points of obstacle and change state\n \"\"\"\n self._obstacle.hit_points -= 1\n self._obstacle.state = HittedState(self._obstacle)\n",
"step-3": "<mask token>\n\n\nclass NewState(ObstacleState):\n\n @property\n def delete(self) ->bool:\n \"\"\"\n Don't delete this obstacle\n :return: False\n \"\"\"\n return False\n\n def hit(self) ->None:\n \"\"\"\n Just remove hit points of obstacle and change state\n \"\"\"\n self._obstacle.hit_points -= 1\n self._obstacle.state = HittedState(self._obstacle)\n",
"step-4": "from angrytux.model.game_objects.obstacle_states.HittedState import HittedState\nfrom angrytux.model.game_objects.obstacle_states.ObstacleState import ObstacleState\n\n\nclass NewState(ObstacleState):\n\n @property\n def delete(self) ->bool:\n \"\"\"\n Don't delete this obstacle\n :return: False\n \"\"\"\n return False\n\n def hit(self) ->None:\n \"\"\"\n Just remove hit points of obstacle and change state\n \"\"\"\n self._obstacle.hit_points -= 1\n self._obstacle.state = HittedState(self._obstacle)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def tree_state(x):
if x <= 19:
state = 'alive'
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = 'dead'
else:
state = 'alive'
return state
else:
state = 'dead'
return state
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tree_state(x):
if x <= 19:
state = 'alive'
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = 'dead'
else:
state = 'alive'
return state
else:
state = 'dead'
return state
print('Welcome to your tree garden!')
while value == 1:
print(age_display)
print(state_display)
print('Please press 1 to increase is age or 2 to quit.')
action = input('Select 1/2 ')
if action == '2':
value = 2
elif action == '1':
tree_age += 1
tree_state(tree_age)
print(state)
if state == 'dead':
print('Sorry your tree is dead.')
quit()
else:
age_display = 'Your tree have an age of: {}'.format(tree_age)
else:
print('Invalid input, please enter the right input.')
if value == 2:
print('Thanks')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tree_age = 1
state = 'alive'
value = 1
age_display = 'Your tree have an age of: {}'.format(tree_age)
state_display = 'Your tree is {}.'.format(state)
def tree_state(x):
if x <= 19:
state = 'alive'
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = 'dead'
else:
state = 'alive'
return state
else:
state = 'dead'
return state
print('Welcome to your tree garden!')
while value == 1:
print(age_display)
print(state_display)
print('Please press 1 to increase is age or 2 to quit.')
action = input('Select 1/2 ')
if action == '2':
value = 2
elif action == '1':
tree_age += 1
tree_state(tree_age)
print(state)
if state == 'dead':
print('Sorry your tree is dead.')
quit()
else:
age_display = 'Your tree have an age of: {}'.format(tree_age)
else:
print('Invalid input, please enter the right input.')
if value == 2:
print('Thanks')
<|reserved_special_token_1|>
import random
tree_age = 1
state = 'alive'
value = 1
age_display = 'Your tree have an age of: {}'.format(tree_age)
state_display = 'Your tree is {}.'.format(state)
def tree_state(x):
if x <= 19:
state = 'alive'
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = 'dead'
else:
state = 'alive'
return state
else:
state = 'dead'
return state
print('Welcome to your tree garden!')
while value == 1:
print(age_display)
print(state_display)
print('Please press 1 to increase is age or 2 to quit.')
action = input('Select 1/2 ')
if action == '2':
value = 2
elif action == '1':
tree_age += 1
tree_state(tree_age)
print(state)
if state == 'dead':
print('Sorry your tree is dead.')
quit()
else:
age_display = 'Your tree have an age of: {}'.format(tree_age)
else:
print('Invalid input, please enter the right input.')
if value == 2:
print('Thanks')
<|reserved_special_token_1|>
import random
tree_age = 1
state = "alive"
value = 1
age_display = "Your tree have an age of: {}".format(tree_age)
state_display = "Your tree is {}.".format(state)
def tree_state(x):
if x <= 19:
state = "alive"
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = "dead"
else:
state = "alive"
return state
else:
state = "dead"
return state
print("Welcome to your tree garden!")
while value == 1 :
print(age_display)
print(state_display)
print("Please press 1 to increase is age or 2 to quit.")
action = input("Select 1/2 ")
if action == "2" :
value = 2
elif action == "1" :
tree_age += 1
#la fonction tree_state ne se lance pas je crois
tree_state(tree_age)
print(state)
if state == "dead":
print("Sorry your tree is dead.")
quit()
else:
age_display = "Your tree have an age of: {}".format(tree_age)
else:
print("Invalid input, please enter the right input.")
if value == 2:
print("Thanks")
|
flexible
|
{
"blob_id": "763f552329a0d38900e08081a1017b33cd882868",
"index": 9391,
"step-1": "<mask token>\n\n\ndef tree_state(x):\n if x <= 19:\n state = 'alive'\n return state\n elif x <= 49:\n rand = random.randrange(tree_age, 51, 1)\n if rand == 50:\n state = 'dead'\n else:\n state = 'alive'\n return state\n else:\n state = 'dead'\n return state\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tree_state(x):\n if x <= 19:\n state = 'alive'\n return state\n elif x <= 49:\n rand = random.randrange(tree_age, 51, 1)\n if rand == 50:\n state = 'dead'\n else:\n state = 'alive'\n return state\n else:\n state = 'dead'\n return state\n\n\nprint('Welcome to your tree garden!')\nwhile value == 1:\n print(age_display)\n print(state_display)\n print('Please press 1 to increase is age or 2 to quit.')\n action = input('Select 1/2 ')\n if action == '2':\n value = 2\n elif action == '1':\n tree_age += 1\n tree_state(tree_age)\n print(state)\n if state == 'dead':\n print('Sorry your tree is dead.')\n quit()\n else:\n age_display = 'Your tree have an age of: {}'.format(tree_age)\n else:\n print('Invalid input, please enter the right input.')\nif value == 2:\n print('Thanks')\n",
"step-3": "<mask token>\ntree_age = 1\nstate = 'alive'\nvalue = 1\nage_display = 'Your tree have an age of: {}'.format(tree_age)\nstate_display = 'Your tree is {}.'.format(state)\n\n\ndef tree_state(x):\n if x <= 19:\n state = 'alive'\n return state\n elif x <= 49:\n rand = random.randrange(tree_age, 51, 1)\n if rand == 50:\n state = 'dead'\n else:\n state = 'alive'\n return state\n else:\n state = 'dead'\n return state\n\n\nprint('Welcome to your tree garden!')\nwhile value == 1:\n print(age_display)\n print(state_display)\n print('Please press 1 to increase is age or 2 to quit.')\n action = input('Select 1/2 ')\n if action == '2':\n value = 2\n elif action == '1':\n tree_age += 1\n tree_state(tree_age)\n print(state)\n if state == 'dead':\n print('Sorry your tree is dead.')\n quit()\n else:\n age_display = 'Your tree have an age of: {}'.format(tree_age)\n else:\n print('Invalid input, please enter the right input.')\nif value == 2:\n print('Thanks')\n",
"step-4": "import random\ntree_age = 1\nstate = 'alive'\nvalue = 1\nage_display = 'Your tree have an age of: {}'.format(tree_age)\nstate_display = 'Your tree is {}.'.format(state)\n\n\ndef tree_state(x):\n if x <= 19:\n state = 'alive'\n return state\n elif x <= 49:\n rand = random.randrange(tree_age, 51, 1)\n if rand == 50:\n state = 'dead'\n else:\n state = 'alive'\n return state\n else:\n state = 'dead'\n return state\n\n\nprint('Welcome to your tree garden!')\nwhile value == 1:\n print(age_display)\n print(state_display)\n print('Please press 1 to increase is age or 2 to quit.')\n action = input('Select 1/2 ')\n if action == '2':\n value = 2\n elif action == '1':\n tree_age += 1\n tree_state(tree_age)\n print(state)\n if state == 'dead':\n print('Sorry your tree is dead.')\n quit()\n else:\n age_display = 'Your tree have an age of: {}'.format(tree_age)\n else:\n print('Invalid input, please enter the right input.')\nif value == 2:\n print('Thanks')\n",
"step-5": "import random\r\n\r\ntree_age = 1\r\n\r\nstate = \"alive\"\r\n\r\nvalue = 1\r\n\r\nage_display = \"Your tree have an age of: {}\".format(tree_age)\r\nstate_display = \"Your tree is {}.\".format(state)\r\n\r\ndef tree_state(x):\r\n if x <= 19:\r\n state = \"alive\"\r\n return state\r\n elif x <= 49:\r\n rand = random.randrange(tree_age, 51, 1)\r\n if rand == 50:\r\n state = \"dead\"\r\n else:\r\n state = \"alive\"\r\n return state\r\n else:\r\n state = \"dead\"\r\n return state\r\n \r\nprint(\"Welcome to your tree garden!\")\r\n\r\nwhile value == 1 :\r\n \r\n print(age_display)\r\n print(state_display)\r\n print(\"Please press 1 to increase is age or 2 to quit.\")\r\n action = input(\"Select 1/2 \")\r\n\r\n if action == \"2\" :\r\n value = 2\r\n\r\n elif action == \"1\" :\r\n tree_age += 1\r\n #la fonction tree_state ne se lance pas je crois\r\n tree_state(tree_age)\r\n print(state)\r\n if state == \"dead\":\r\n print(\"Sorry your tree is dead.\")\r\n quit()\r\n else:\r\n age_display = \"Your tree have an age of: {}\".format(tree_age)\r\n\r\n else:\r\n print(\"Invalid input, please enter the right input.\")\r\n\r\nif value == 2:\r\n print(\"Thanks\")\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'copying from {from_file} to {to_file}')
<|reserved_special_token_0|>
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from_file = input('form_file')
to_file = input('to_file')
print(f'copying from {from_file} to {to_file}')
indata = open(from_file).read()
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
<|reserved_special_token_1|>
from os.path import exists
from_file = input('form_file')
to_file = input('to_file')
print(f'copying from {from_file} to {to_file}')
indata = open(from_file).read()
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
<|reserved_special_token_1|>
from os.path import exists
from_file = input('form_file')
to_file = input('to_file')
print(f"copying from {from_file} to {to_file}")
indata = open(from_file).read()#这种方式读取文件后无需close
print(f"the input file is {len(indata)} bytes long")
print(f"does the output file exist? {exists(to_file)}")
print("return to continue, CTRL-C to abort")
input('?')
open(to_file,'w').write(indata)#无需close
print("done!")
|
flexible
|
{
"blob_id": "4f0933c58aa1d41faf4f949d9684c04f9e01b473",
"index": 36,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'copying from {from_file} to {to_file}')\n<mask token>\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n",
"step-3": "<mask token>\nfrom_file = input('form_file')\nto_file = input('to_file')\nprint(f'copying from {from_file} to {to_file}')\nindata = open(from_file).read()\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n",
"step-4": "from os.path import exists\nfrom_file = input('form_file')\nto_file = input('to_file')\nprint(f'copying from {from_file} to {to_file}')\nindata = open(from_file).read()\nprint(f'the input file is {len(indata)} bytes long')\nprint(f'does the output file exist? {exists(to_file)}')\nprint('return to continue, CTRL-C to abort')\ninput('?')\nopen(to_file, 'w').write(indata)\nprint('done!')\n",
"step-5": "from os.path import exists\n\nfrom_file = input('form_file')\nto_file = input('to_file')\n\nprint(f\"copying from {from_file} to {to_file}\")\nindata = open(from_file).read()#这种方式读取文件后无需close\nprint(f\"the input file is {len(indata)} bytes long\")\n\nprint(f\"does the output file exist? {exists(to_file)}\")\nprint(\"return to continue, CTRL-C to abort\")\ninput('?')\n\nopen(to_file,'w').write(indata)#无需close\n\nprint(\"done!\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# Core Library modules
import os
# Third party modules
import nose
# First party modules
import lumixmaptool.copy as copy
# Tests
def get_parser_test():
"""Check if the evaluation model returns a parser object."""
copy.get_parser()
def parse_mapdata_test():
current_folder = os.path.dirname(os.path.realpath(__file__))
misc_folder = os.path.join(current_folder, "misc")
maplistdata_path = os.path.join(misc_folder, "MapList.dat")
result = copy.parse_mapdata(maplistdata_path)
expected = {
"num1": "00010001",
"num2": "00010001",
"regions": {
1: [
"BACK/B0000035.DFT",
"BACK/B0000036.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"BACK/B0000053.DFT",
"BACK/B0000054.DFT",
"NAME/N0000035.DFT",
"NAME/N0000036.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"NAME/N0000053.DFT",
"NAME/N0000054.DFT",
"POI/P0000035.DFT",
"POI/P0000036.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
"POI/P0000053.DFT",
"POI/P0000054.DFT",
],
2: [
"BACK/B0000024.DFT",
"BACK/B0000025.DFT",
"BACK/B0000026.DFT",
"BACK/B0000027.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000035.DFT",
"BACK/B0000036.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"NAME/N0000024.DFT",
"NAME/N0000025.DFT",
"NAME/N0000026.DFT",
"NAME/N0000027.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000035.DFT",
"NAME/N0000036.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"POI/P0000024.DFT",
"POI/P0000025.DFT",
"POI/P0000026.DFT",
"POI/P0000027.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000035.DFT",
"POI/P0000036.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
],
3: [
"BACK/B0000001.DFT",
"BACK/B0000008.DFT",
"BACK/B0000009.DFT",
"BACK/B0000010.DFT",
"BACK/B0000017.DFT",
"BACK/B0000018.DFT",
"BACK/B0000019.DFT",
"BACK/B0000026.DFT",
"BACK/B0000027.DFT",
"NAME/N0000001.DFT",
"NAME/N0000008.DFT",
"NAME/N0000009.DFT",
"NAME/N0000010.DFT",
"NAME/N0000017.DFT",
"NAME/N0000018.DFT",
"NAME/N0000019.DFT",
"NAME/N0000026.DFT",
"NAME/N0000027.DFT",
"POI/P0000017.DFT",
"POI/P0000018.DFT",
"POI/P0000019.DFT",
"POI/P0000026.DFT",
"POI/P0000027.DFT",
],
4: [
"BACK/B0000019.DFT",
"BACK/B0000020.DFT",
"BACK/B0000021.DFT",
"BACK/B0000022.DFT",
"BACK/B0000027.DFT",
"BACK/B0000028.DFT",
"BACK/B0000029.DFT",
"BACK/B0000030.DFT",
"BACK/B0000031.DFT",
"BACK/B0000036.DFT",
"BACK/B0000037.DFT",
"BACK/B0000038.DFT",
"BACK/B0000039.DFT",
"BACK/B0000040.DFT",
"BACK/B0000045.DFT",
"BACK/B0000046.DFT",
"BACK/B0000047.DFT",
"BACK/B0000048.DFT",
"BACK/B0000049.DFT",
"BACK/B0000054.DFT",
"NAME/N0000019.DFT",
"NAME/N0000020.DFT",
"NAME/N0000021.DFT",
"NAME/N0000022.DFT",
"NAME/N0000027.DFT",
"NAME/N0000028.DFT",
"NAME/N0000029.DFT",
"NAME/N0000030.DFT",
"NAME/N0000031.DFT",
"NAME/N0000036.DFT",
"NAME/N0000037.DFT",
"NAME/N0000038.DFT",
"NAME/N0000039.DFT",
"NAME/N0000040.DFT",
"NAME/N0000045.DFT",
"NAME/N0000046.DFT",
"NAME/N0000047.DFT",
"NAME/N0000048.DFT",
"NAME/N0000049.DFT",
"NAME/N0000054.DFT",
"POI/P0000019.DFT",
"POI/P0000020.DFT",
"POI/P0000021.DFT",
"POI/P0000022.DFT",
"POI/P0000027.DFT",
"POI/P0000028.DFT",
"POI/P0000029.DFT",
"POI/P0000030.DFT",
"POI/P0000031.DFT",
"POI/P0000036.DFT",
"POI/P0000037.DFT",
"POI/P0000038.DFT",
"POI/P0000039.DFT",
"POI/P0000040.DFT",
"POI/P0000045.DFT",
"POI/P0000046.DFT",
"POI/P0000047.DFT",
"POI/P0000048.DFT",
"POI/P0000049.DFT",
"POI/P0000054.DFT",
],
5: [
"BACK/B0000002.DFT",
"BACK/B0000003.DFT",
"BACK/B0000004.DFT",
"BACK/B0000011.DFT",
"BACK/B0000012.DFT",
"BACK/B0000013.DFT",
"BACK/B0000020.DFT",
"BACK/B0000021.DFT",
"BACK/B0000022.DFT",
"BACK/B0000029.DFT",
"BACK/B0000030.DFT",
"BACK/B0000031.DFT",
"NAME/N0000002.DFT",
"NAME/N0000003.DFT",
"NAME/N0000004.DFT",
"NAME/N0000011.DFT",
"NAME/N0000012.DFT",
"NAME/N0000013.DFT",
"NAME/N0000020.DFT",
"NAME/N0000021.DFT",
"NAME/N0000022.DFT",
"NAME/N0000029.DFT",
"NAME/N0000030.DFT",
"NAME/N0000031.DFT",
"POI/P0000003.DFT",
"POI/P0000011.DFT",
"POI/P0000012.DFT",
"POI/P0000013.DFT",
"POI/P0000020.DFT",
"POI/P0000021.DFT",
"POI/P0000022.DFT",
"POI/P0000029.DFT",
"POI/P0000030.DFT",
"POI/P0000031.DFT",
],
6: [
"BACK/B0000040.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000049.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"NAME/N0000040.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000049.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"POI/P0000040.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000049.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
],
7: [
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"BACK/B0000052.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"NAME/N0000052.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
"POI/P0000052.DFT",
],
8: [
"BACK/B0000031.DFT",
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000040.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000049.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"NAME/N0000031.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000040.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000049.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"POI/P0000031.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000040.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000049.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
],
9: [
"BACK/B0000005.DFT",
"BACK/B0000006.DFT",
"BACK/B0000007.DFT",
"BACK/B0000014.DFT",
"BACK/B0000015.DFT",
"BACK/B0000016.DFT",
"BACK/B0000023.DFT",
"BACK/B0000024.DFT",
"BACK/B0000025.DFT",
"BACK/B0000032.DFT",
"BACK/B0000033.DFT",
"BACK/B0000034.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"NAME/N0000005.DFT",
"NAME/N0000006.DFT",
"NAME/N0000007.DFT",
"NAME/N0000014.DFT",
"NAME/N0000015.DFT",
"NAME/N0000016.DFT",
"NAME/N0000023.DFT",
"NAME/N0000024.DFT",
"NAME/N0000025.DFT",
"NAME/N0000032.DFT",
"NAME/N0000033.DFT",
"NAME/N0000034.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"POI/P0000014.DFT",
"POI/P0000015.DFT",
"POI/P0000023.DFT",
"POI/P0000024.DFT",
"POI/P0000025.DFT",
"POI/P0000032.DFT",
"POI/P0000033.DFT",
"POI/P0000034.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
],
10: [
"BACK/B0000037.DFT",
"BACK/B0000041.DFT",
"BACK/B0000042.DFT",
"BACK/B0000043.DFT",
"BACK/B0000044.DFT",
"BACK/B0000045.DFT",
"BACK/B0000046.DFT",
"BACK/B0000050.DFT",
"BACK/B0000051.DFT",
"BACK/B0000052.DFT",
"BACK/B0000053.DFT",
"BACK/B0000054.DFT",
"NAME/N0000037.DFT",
"NAME/N0000041.DFT",
"NAME/N0000042.DFT",
"NAME/N0000043.DFT",
"NAME/N0000044.DFT",
"NAME/N0000045.DFT",
"NAME/N0000046.DFT",
"NAME/N0000050.DFT",
"NAME/N0000051.DFT",
"NAME/N0000052.DFT",
"NAME/N0000053.DFT",
"NAME/N0000054.DFT",
"POI/P0000037.DFT",
"POI/P0000041.DFT",
"POI/P0000042.DFT",
"POI/P0000043.DFT",
"POI/P0000044.DFT",
"POI/P0000045.DFT",
"POI/P0000046.DFT",
"POI/P0000050.DFT",
"POI/P0000051.DFT",
"POI/P0000052.DFT",
"POI/P0000053.DFT",
"POI/P0000054.DFT",
],
},
}
nose.tools.assert_equal(result, expected)
|
normal
|
{
"blob_id": "4dfdbc692858a627248cbe47d19b43c2a27ec70e",
"index": 7373,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-3": "<mask token>\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-4": "import os\nimport nose\nimport lumixmaptool.copy as copy\n\n\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, 'misc')\n maplistdata_path = os.path.join(misc_folder, 'MapList.dat')\n result = copy.parse_mapdata(maplistdata_path)\n expected = {'num1': '00010001', 'num2': '00010001', 'regions': {(1): [\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000044.DFT',\n 'BACK/B0000045.DFT', 'BACK/B0000053.DFT', 'BACK/B0000054.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000044.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000053.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000044.DFT',\n 'POI/P0000045.DFT', 'POI/P0000053.DFT', 'POI/P0000054.DFT'], (2): [\n 'BACK/B0000024.DFT', 'BACK/B0000025.DFT', 'BACK/B0000026.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000035.DFT', 'BACK/B0000036.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000043.DFT', 'BACK/B0000044.DFT', 'BACK/B0000045.DFT',\n 'NAME/N0000024.DFT', 'NAME/N0000025.DFT', 'NAME/N0000026.DFT',\n 'NAME/N0000027.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000035.DFT', 'NAME/N0000036.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000043.DFT', 'NAME/N0000044.DFT', 'NAME/N0000045.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000026.DFT',\n 'POI/P0000027.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000035.DFT', 'POI/P0000036.DFT', 'POI/P0000042.DFT',\n 'POI/P0000043.DFT', 'POI/P0000044.DFT', 'POI/P0000045.DFT'], (3): [\n 'BACK/B0000001.DFT', 'BACK/B0000008.DFT', 'BACK/B0000009.DFT',\n 'BACK/B0000010.DFT', 'BACK/B0000017.DFT', 'BACK/B0000018.DFT',\n 'BACK/B0000019.DFT', 'BACK/B0000026.DFT', 'BACK/B0000027.DFT',\n 'NAME/N0000001.DFT', 'NAME/N0000008.DFT', 'NAME/N0000009.DFT',\n 'NAME/N0000010.DFT', 'NAME/N0000017.DFT', 'NAME/N0000018.DFT',\n 'NAME/N0000019.DFT', 'NAME/N0000026.DFT', 'NAME/N0000027.DFT',\n 'POI/P0000017.DFT', 'POI/P0000018.DFT', 'POI/P0000019.DFT',\n 'POI/P0000026.DFT', 'POI/P0000027.DFT'], (4): ['BACK/B0000019.DFT',\n 'BACK/B0000020.DFT', 'BACK/B0000021.DFT', 'BACK/B0000022.DFT',\n 'BACK/B0000027.DFT', 'BACK/B0000028.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'BACK/B0000036.DFT',\n 'BACK/B0000037.DFT', 'BACK/B0000038.DFT', 'BACK/B0000039.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000047.DFT', 'BACK/B0000048.DFT', 'BACK/B0000049.DFT',\n 'BACK/B0000054.DFT', 'NAME/N0000019.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000027.DFT',\n 'NAME/N0000028.DFT', 'NAME/N0000029.DFT', 'NAME/N0000030.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000036.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000038.DFT', 'NAME/N0000039.DFT', 'NAME/N0000040.DFT',\n 'NAME/N0000045.DFT', 'NAME/N0000046.DFT', 'NAME/N0000047.DFT',\n 'NAME/N0000048.DFT', 'NAME/N0000049.DFT', 'NAME/N0000054.DFT',\n 'POI/P0000019.DFT', 'POI/P0000020.DFT', 'POI/P0000021.DFT',\n 'POI/P0000022.DFT', 'POI/P0000027.DFT', 'POI/P0000028.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT',\n 'POI/P0000036.DFT', 'POI/P0000037.DFT', 'POI/P0000038.DFT',\n 'POI/P0000039.DFT', 'POI/P0000040.DFT', 'POI/P0000045.DFT',\n 'POI/P0000046.DFT', 'POI/P0000047.DFT', 'POI/P0000048.DFT',\n 'POI/P0000049.DFT', 'POI/P0000054.DFT'], (5): ['BACK/B0000002.DFT',\n 'BACK/B0000003.DFT', 'BACK/B0000004.DFT', 'BACK/B0000011.DFT',\n 'BACK/B0000012.DFT', 'BACK/B0000013.DFT', 'BACK/B0000020.DFT',\n 'BACK/B0000021.DFT', 'BACK/B0000022.DFT', 'BACK/B0000029.DFT',\n 'BACK/B0000030.DFT', 'BACK/B0000031.DFT', 'NAME/N0000002.DFT',\n 'NAME/N0000003.DFT', 'NAME/N0000004.DFT', 'NAME/N0000011.DFT',\n 'NAME/N0000012.DFT', 'NAME/N0000013.DFT', 'NAME/N0000020.DFT',\n 'NAME/N0000021.DFT', 'NAME/N0000022.DFT', 'NAME/N0000029.DFT',\n 'NAME/N0000030.DFT', 'NAME/N0000031.DFT', 'POI/P0000003.DFT',\n 'POI/P0000011.DFT', 'POI/P0000012.DFT', 'POI/P0000013.DFT',\n 'POI/P0000020.DFT', 'POI/P0000021.DFT', 'POI/P0000022.DFT',\n 'POI/P0000029.DFT', 'POI/P0000030.DFT', 'POI/P0000031.DFT'], (6): [\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (7): [\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'POI/P0000032.DFT', 'POI/P0000033.DFT', 'POI/P0000034.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT'], (8): [\n 'BACK/B0000031.DFT', 'BACK/B0000032.DFT', 'BACK/B0000033.DFT',\n 'BACK/B0000040.DFT', 'BACK/B0000041.DFT', 'BACK/B0000042.DFT',\n 'BACK/B0000049.DFT', 'BACK/B0000050.DFT', 'BACK/B0000051.DFT',\n 'NAME/N0000031.DFT', 'NAME/N0000032.DFT', 'NAME/N0000033.DFT',\n 'NAME/N0000040.DFT', 'NAME/N0000041.DFT', 'NAME/N0000042.DFT',\n 'NAME/N0000049.DFT', 'NAME/N0000050.DFT', 'NAME/N0000051.DFT',\n 'POI/P0000031.DFT', 'POI/P0000032.DFT', 'POI/P0000033.DFT',\n 'POI/P0000040.DFT', 'POI/P0000041.DFT', 'POI/P0000042.DFT',\n 'POI/P0000049.DFT', 'POI/P0000050.DFT', 'POI/P0000051.DFT'], (9): [\n 'BACK/B0000005.DFT', 'BACK/B0000006.DFT', 'BACK/B0000007.DFT',\n 'BACK/B0000014.DFT', 'BACK/B0000015.DFT', 'BACK/B0000016.DFT',\n 'BACK/B0000023.DFT', 'BACK/B0000024.DFT', 'BACK/B0000025.DFT',\n 'BACK/B0000032.DFT', 'BACK/B0000033.DFT', 'BACK/B0000034.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'NAME/N0000005.DFT', 'NAME/N0000006.DFT', 'NAME/N0000007.DFT',\n 'NAME/N0000014.DFT', 'NAME/N0000015.DFT', 'NAME/N0000016.DFT',\n 'NAME/N0000023.DFT', 'NAME/N0000024.DFT', 'NAME/N0000025.DFT',\n 'NAME/N0000032.DFT', 'NAME/N0000033.DFT', 'NAME/N0000034.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'POI/P0000014.DFT', 'POI/P0000015.DFT', 'POI/P0000023.DFT',\n 'POI/P0000024.DFT', 'POI/P0000025.DFT', 'POI/P0000032.DFT',\n 'POI/P0000033.DFT', 'POI/P0000034.DFT', 'POI/P0000041.DFT',\n 'POI/P0000042.DFT', 'POI/P0000043.DFT'], (10): ['BACK/B0000037.DFT',\n 'BACK/B0000041.DFT', 'BACK/B0000042.DFT', 'BACK/B0000043.DFT',\n 'BACK/B0000044.DFT', 'BACK/B0000045.DFT', 'BACK/B0000046.DFT',\n 'BACK/B0000050.DFT', 'BACK/B0000051.DFT', 'BACK/B0000052.DFT',\n 'BACK/B0000053.DFT', 'BACK/B0000054.DFT', 'NAME/N0000037.DFT',\n 'NAME/N0000041.DFT', 'NAME/N0000042.DFT', 'NAME/N0000043.DFT',\n 'NAME/N0000044.DFT', 'NAME/N0000045.DFT', 'NAME/N0000046.DFT',\n 'NAME/N0000050.DFT', 'NAME/N0000051.DFT', 'NAME/N0000052.DFT',\n 'NAME/N0000053.DFT', 'NAME/N0000054.DFT', 'POI/P0000037.DFT',\n 'POI/P0000041.DFT', 'POI/P0000042.DFT', 'POI/P0000043.DFT',\n 'POI/P0000044.DFT', 'POI/P0000045.DFT', 'POI/P0000046.DFT',\n 'POI/P0000050.DFT', 'POI/P0000051.DFT', 'POI/P0000052.DFT',\n 'POI/P0000053.DFT', 'POI/P0000054.DFT']}}\n nose.tools.assert_equal(result, expected)\n",
"step-5": "#!/usr/bin/env python\n\n# Core Library modules\nimport os\n\n# Third party modules\nimport nose\n\n# First party modules\nimport lumixmaptool.copy as copy\n\n\n# Tests\ndef get_parser_test():\n \"\"\"Check if the evaluation model returns a parser object.\"\"\"\n copy.get_parser()\n\n\ndef parse_mapdata_test():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n maplistdata_path = os.path.join(misc_folder, \"MapList.dat\")\n result = copy.parse_mapdata(maplistdata_path)\n expected = {\n \"num1\": \"00010001\",\n \"num2\": \"00010001\",\n \"regions\": {\n 1: [\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 2: [\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000035.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000035.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000035.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n ],\n 3: [\n \"BACK/B0000001.DFT\",\n \"BACK/B0000008.DFT\",\n \"BACK/B0000009.DFT\",\n \"BACK/B0000010.DFT\",\n \"BACK/B0000017.DFT\",\n \"BACK/B0000018.DFT\",\n \"BACK/B0000019.DFT\",\n \"BACK/B0000026.DFT\",\n \"BACK/B0000027.DFT\",\n \"NAME/N0000001.DFT\",\n \"NAME/N0000008.DFT\",\n \"NAME/N0000009.DFT\",\n \"NAME/N0000010.DFT\",\n \"NAME/N0000017.DFT\",\n \"NAME/N0000018.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000026.DFT\",\n \"NAME/N0000027.DFT\",\n \"POI/P0000017.DFT\",\n \"POI/P0000018.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000026.DFT\",\n \"POI/P0000027.DFT\",\n ],\n 4: [\n \"BACK/B0000019.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000027.DFT\",\n \"BACK/B0000028.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"BACK/B0000036.DFT\",\n \"BACK/B0000037.DFT\",\n \"BACK/B0000038.DFT\",\n \"BACK/B0000039.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000047.DFT\",\n \"BACK/B0000048.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000019.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000027.DFT\",\n \"NAME/N0000028.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000036.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000038.DFT\",\n \"NAME/N0000039.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000047.DFT\",\n \"NAME/N0000048.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000019.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000027.DFT\",\n \"POI/P0000028.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000036.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000038.DFT\",\n \"POI/P0000039.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000047.DFT\",\n \"POI/P0000048.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000054.DFT\",\n ],\n 5: [\n \"BACK/B0000002.DFT\",\n \"BACK/B0000003.DFT\",\n \"BACK/B0000004.DFT\",\n \"BACK/B0000011.DFT\",\n \"BACK/B0000012.DFT\",\n \"BACK/B0000013.DFT\",\n \"BACK/B0000020.DFT\",\n \"BACK/B0000021.DFT\",\n \"BACK/B0000022.DFT\",\n \"BACK/B0000029.DFT\",\n \"BACK/B0000030.DFT\",\n \"BACK/B0000031.DFT\",\n \"NAME/N0000002.DFT\",\n \"NAME/N0000003.DFT\",\n \"NAME/N0000004.DFT\",\n \"NAME/N0000011.DFT\",\n \"NAME/N0000012.DFT\",\n \"NAME/N0000013.DFT\",\n \"NAME/N0000020.DFT\",\n \"NAME/N0000021.DFT\",\n \"NAME/N0000022.DFT\",\n \"NAME/N0000029.DFT\",\n \"NAME/N0000030.DFT\",\n \"NAME/N0000031.DFT\",\n \"POI/P0000003.DFT\",\n \"POI/P0000011.DFT\",\n \"POI/P0000012.DFT\",\n \"POI/P0000013.DFT\",\n \"POI/P0000020.DFT\",\n \"POI/P0000021.DFT\",\n \"POI/P0000022.DFT\",\n \"POI/P0000029.DFT\",\n \"POI/P0000030.DFT\",\n \"POI/P0000031.DFT\",\n ],\n 6: [\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 7: [\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n ],\n 8: [\n \"BACK/B0000031.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000040.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000049.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"NAME/N0000031.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000040.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000049.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"POI/P0000031.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000040.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000049.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n ],\n 9: [\n \"BACK/B0000005.DFT\",\n \"BACK/B0000006.DFT\",\n \"BACK/B0000007.DFT\",\n \"BACK/B0000014.DFT\",\n \"BACK/B0000015.DFT\",\n \"BACK/B0000016.DFT\",\n \"BACK/B0000023.DFT\",\n \"BACK/B0000024.DFT\",\n \"BACK/B0000025.DFT\",\n \"BACK/B0000032.DFT\",\n \"BACK/B0000033.DFT\",\n \"BACK/B0000034.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"NAME/N0000005.DFT\",\n \"NAME/N0000006.DFT\",\n \"NAME/N0000007.DFT\",\n \"NAME/N0000014.DFT\",\n \"NAME/N0000015.DFT\",\n \"NAME/N0000016.DFT\",\n \"NAME/N0000023.DFT\",\n \"NAME/N0000024.DFT\",\n \"NAME/N0000025.DFT\",\n \"NAME/N0000032.DFT\",\n \"NAME/N0000033.DFT\",\n \"NAME/N0000034.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"POI/P0000014.DFT\",\n \"POI/P0000015.DFT\",\n \"POI/P0000023.DFT\",\n \"POI/P0000024.DFT\",\n \"POI/P0000025.DFT\",\n \"POI/P0000032.DFT\",\n \"POI/P0000033.DFT\",\n \"POI/P0000034.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n ],\n 10: [\n \"BACK/B0000037.DFT\",\n \"BACK/B0000041.DFT\",\n \"BACK/B0000042.DFT\",\n \"BACK/B0000043.DFT\",\n \"BACK/B0000044.DFT\",\n \"BACK/B0000045.DFT\",\n \"BACK/B0000046.DFT\",\n \"BACK/B0000050.DFT\",\n \"BACK/B0000051.DFT\",\n \"BACK/B0000052.DFT\",\n \"BACK/B0000053.DFT\",\n \"BACK/B0000054.DFT\",\n \"NAME/N0000037.DFT\",\n \"NAME/N0000041.DFT\",\n \"NAME/N0000042.DFT\",\n \"NAME/N0000043.DFT\",\n \"NAME/N0000044.DFT\",\n \"NAME/N0000045.DFT\",\n \"NAME/N0000046.DFT\",\n \"NAME/N0000050.DFT\",\n \"NAME/N0000051.DFT\",\n \"NAME/N0000052.DFT\",\n \"NAME/N0000053.DFT\",\n \"NAME/N0000054.DFT\",\n \"POI/P0000037.DFT\",\n \"POI/P0000041.DFT\",\n \"POI/P0000042.DFT\",\n \"POI/P0000043.DFT\",\n \"POI/P0000044.DFT\",\n \"POI/P0000045.DFT\",\n \"POI/P0000046.DFT\",\n \"POI/P0000050.DFT\",\n \"POI/P0000051.DFT\",\n \"POI/P0000052.DFT\",\n \"POI/P0000053.DFT\",\n \"POI/P0000054.DFT\",\n ],\n },\n }\n nose.tools.assert_equal(result, expected)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
|
normal
|
{
"blob_id": "bd06030ace665a0686c894a863e5c779b6d0931c",
"index": 6447,
"step-1": "<mask token>\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\n<mask token>\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.__version__\n<mask token>\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\n<mask token>\nsentences.extend(question)\nsentences.extend(answer)\n<mask token>\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\n<mask token>\nlen(words)\n<mask token>\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\n<mask token>\nx_encoder[0]\n<mask token>\nx_decoder[0]\nlen(x_decoder[0])\n<mask token>\nprint(y_decoder[0])\n<mask token>\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\n<mask token>\nprint(y_decoder[0])\n<mask token>\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\n<mask token>\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\n<mask token>\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\n<mask token>\ninput_seq\n<mask token>\nprint(sentence)\n<mask token>\ninput_seq\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n<mask token>\nprint(sentence)\n",
"step-3": "<mask token>\ntf.__version__\nPAD = '<PADDING>'\nSTA = '<START>'\nEND = '<END>'\nOOV = '<OOV>'\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\nmax_sequences = 30\nembedding_dim = 100\nlstm_hidden_dim = 128\nRE_FILTER = re.compile('[.,!?\"\\':;~()]')\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\nwords = []\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\nwords = [word for word in words if len(word) > 0]\nwords = list(set(words))\nwords[:0] = [PAD, STA, END, OOV]\nlen(words)\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\nx_encoder[0]\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\nx_decoder[0]\nlen(x_decoder[0])\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\nprint(y_decoder[0])\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\ny_decoder = one_hot_data\nprint(y_decoder[0])\nencoder_inputs = layers.Input(shape=(None,))\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, dropout=\n 0.1, recurrent_dropout=0.5, return_state=True)(encoder_outputs)\nencoder_states = [state_h, state_c]\ndecoder_inputs = layers.Input(shape=(None,))\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_lstm = layers.LSTM(lstm_hidden_dim, dropout=0.1, recurrent_dropout=\n 0.5, return_state=True, return_sequences=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs, initial_state=\n encoder_states)\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\nencoder_model = models.Model(encoder_inputs, encoder_states)\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs, [\n decoder_outputs] + decoder_states)\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\n<mask token>\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n",
"step-4": "<mask token>\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\nimport numpy as np\nimport pandas as pd\nimport os\nimport re\nfrom konlpy.tag import Okt\nimport pickle\nimport tensorflow as tf\ntf.__version__\nPAD = '<PADDING>'\nSTA = '<START>'\nEND = '<END>'\nOOV = '<OOV>'\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\nmax_sequences = 30\nembedding_dim = 100\nlstm_hidden_dim = 128\nRE_FILTER = re.compile('[.,!?\"\\':;~()]')\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\nchatbot_data.head()\nlen(chatbot_data['Q'].unique())\nlen(question)\n\n\ndef pos_tag(sentences):\n tagger = Okt()\n sentences_pos = []\n for sentence in sentences:\n sentence = re.sub(RE_FILTER, '', sentence)\n sentence = ' '.join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n return sentences_pos\n\n\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\nwords = []\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\nwords = [word for word in words if len(word) > 0]\nwords = list(set(words))\nwords[:0] = [PAD, STA, END, OOV]\nlen(words)\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\nprint(dict(list(word_to_index.items())[:20]))\nprint(dict(list(index_to_word.items())[:20]))\n\n\ndef convert_text_to_index(sentences, vocabulary, type):\n sentences_index = []\n for sentence in sentences:\n sentence_index = []\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n sentence_index.extend([vocabulary[word]])\n else:\n sentence_index.extend([vocabulary[OOV]])\n if type == DECODER_TARGET:\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences - 1] + [\n vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n elif len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary\n [PAD]]\n sentences_index.append(sentence_index)\n return np.asarray(sentences_index)\n\n\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\nx_encoder[0]\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\nx_decoder[0]\nlen(x_decoder[0])\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\nprint(y_decoder[0])\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\ny_decoder = one_hot_data\nprint(y_decoder[0])\nencoder_inputs = layers.Input(shape=(None,))\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, dropout=\n 0.1, recurrent_dropout=0.5, return_state=True)(encoder_outputs)\nencoder_states = [state_h, state_c]\ndecoder_inputs = layers.Input(shape=(None,))\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_lstm = layers.LSTM(lstm_hidden_dim, dropout=0.1, recurrent_dropout=\n 0.5, return_state=True, return_sequences=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs, initial_state=\n encoder_states)\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics\n =['accuracy'])\nencoder_model = models.Model(encoder_inputs, encoder_states)\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs = decoder_embedding(decoder_inputs)\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs, [\n decoder_outputs] + decoder_states)\n\n\ndef convert_index_to_text(indexs, vocabulary):\n sentence = ''\n for index in indexs:\n if index == END_INDEX:\n break\n if vocabulary.get(index) is not None:\n sentence += vocabulary[index]\n else:\n sentence.extend([vocabulary[OOV_INDEX]])\n sentence += ' '\n return sentence\n\n\nfrom tqdm import tqdm\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100,\n batch_size=64, verbose=1)\n model.summary()\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n indexs = np.argmax(results[0], 1)\n sentence = convert_index_to_text(indexs, index_to_word)\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights(\n './seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\ndef make_predict_input(sentence):\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n return input_seq\n\n\ndef generate_text(input_seq):\n states = encoder_model.predict(input_seq)\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = STA_INDEX\n indexs = []\n while 1:\n decoder_outputs, state_h, state_c = decoder_model.predict([\n target_seq] + states)\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n states = [state_h, state_c]\n sentence = convert_index_to_text(indexs, index_to_word)\n return sentence\n\n\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Chatbot learning\n학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.\n\"\"\"\n\n\n\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\n\nimport numpy as np\nimport pandas as pd\n#import matplotlib.pyplot as plt\nimport os\nimport re\n\nfrom konlpy.tag import Okt\nimport pickle\nimport tensorflow as tf\ntf.__version__\n\n# 태그 단어\nPAD = \"<PADDING>\" # 패딩\nSTA = \"<START>\" # 시작\nEND = \"<END>\" # 끝\nOOV = \"<OOV>\" # 없는 단어(Out of Vocabulary)\n\n# 태그 인덱스\nPAD_INDEX = 0\nSTA_INDEX = 1\nEND_INDEX = 2\nOOV_INDEX = 3\n\n# 데이터 타입\nENCODER_INPUT = 0\nDECODER_INPUT = 1\nDECODER_TARGET = 2\n\n# 한 문장에서 단어 시퀀스의 최대 개수\nmax_sequences = 30\n\n# 임베딩 벡터 차원\nembedding_dim = 100\n\n# LSTM 히든레이어 차원\nlstm_hidden_dim = 128\n\n# 정규 표현식 필터\nRE_FILTER = re.compile(\"[.,!?\\\"':;~()]\")\n\n# 챗봇 데이터 로드\nchatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\nquestion, answer = list(chatbot_data['Q']), list(chatbot_data['A'])\n\nchatbot_data.head()\n\nlen(chatbot_data['Q'].unique())\n\n# 데이터 개수\nlen(question)\n\n\n# 형태소분석 함수\ndef pos_tag(sentences):\n \n # KoNLPy 형태소분석기 설정\n tagger = Okt()\n \n # 문장 품사 변수 초기화\n sentences_pos = []\n \n # 모든 문장 반복\n for sentence in sentences:\n # 특수기호 제거\n sentence = re.sub(RE_FILTER, \"\", sentence)\n #print(sentence)\n # 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임\n sentence = \" \".join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n \n return sentences_pos\n\n# 형태소분석 수행\nquestion = pos_tag(question)\nanswer = pos_tag(answer)\n\n\n# 질문과 대답 문장들을 하나로 합침\nsentences = []\nsentences.extend(question)\nsentences.extend(answer)\n\nwords = []\n\n# 단어들의 배열 생성\nfor sentence in sentences:\n for word in sentence.split():\n words.append(word)\n\n# 길이가 0인 단어는 삭제\nwords = [word for word in words if len(word) > 0]\n# 중복된 단어 삭제\nwords = list(set(words))\n# 제일 앞에 태그 단어 삽입\nwords[:0] = [PAD, STA, END, OOV]\n# 단어 개수\nlen(words)\n\n# 단어와 인덱스의 딕셔너리 생성\n\n\nword_to_index = {word: index for index, word in enumerate(words)}\nindex_to_word = {index: word for index, word in enumerate(words)}\n\n\n#word_index vocab 저장 - > \nwith open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:\n pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)\n\nwith open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:\n pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)\n\n\n# 단어 -> 인덱스\n# 문장을 인덱스로 변환하여 모델 입력으로 사용\nprint(dict(list(word_to_index.items())[:20]))\n\n# 인덱스 -> 단어\n# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용\nprint(dict(list(index_to_word.items())[:20]))\n\n# 문장을 인덱스로 변환\ndef convert_text_to_index(sentences, vocabulary, type): \n \n sentences_index = []\n \n # 모든 문장에 대해서 반복\n for sentence in sentences:\n sentence_index = []\n \n # 디코더 입력일 경우 맨 앞에 START 태그 추가\n if type == DECODER_INPUT:\n sentence_index.extend([vocabulary[STA]])\n \n # 문장의 단어들을 띄어쓰기로 분리\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n # 사전에 있는 단어면 해당 인덱스를 추가\n sentence_index.extend([vocabulary[word]])\n else:\n # 사전에 없는 단어면 OOV 인덱스를 추가\n sentence_index.extend([vocabulary[OOV]])\n\n # 최대 길이 검사\n if type == DECODER_TARGET:\n # 디코더 목표일 경우 맨 뒤에 END 태그 추가\n if len(sentence_index) >= max_sequences:\n sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]\n else:\n sentence_index += [vocabulary[END]]\n else:\n if len(sentence_index) > max_sequences:\n sentence_index = sentence_index[:max_sequences]\n \n # 최대 길이에 없는 공간은 패딩 인덱스로 채움\n sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]\n \n # 문장의 인덱스 배열을 추가\n sentences_index.append(sentence_index)\n\n return np.asarray(sentences_index)\n\n# 인코더 입력 인덱스 변환\nx_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)\n\n# 첫 번째 인코더 입력 출력 (12시 땡)\nx_encoder[0]\n\n# 디코더 입력 인덱스 변환\nx_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)\n\n# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)\nx_decoder[0]\n\nlen(x_decoder[0])\n\n# 디코더 목표 인덱스 변환\ny_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)\n\n# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)\nprint(y_decoder[0])\n\n# 원핫인코딩 초기화\none_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))\n\n# 디코더 목표를 원핫인코딩으로 변환\n# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임\nfor i, sequence in enumerate(y_decoder):\n for j, index in enumerate(sequence):\n one_hot_data[i, j, index] = 1\n\n# 디코더 목표 설정\ny_decoder = one_hot_data\n\n# 첫 번째 디코더 목표 출력\nprint(y_decoder[0])\n\n#--------------------------------------------\n# 훈련 모델 인코더 정의\n#--------------------------------------------\n\n# 입력 문장의 인덱스 시퀀스를 입력으로 받음\nencoder_inputs = layers.Input(shape=(None,))\n\n# 임베딩 레이어\nencoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)\n\n# return_state가 True면 상태값 리턴\n# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재\nencoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True)(encoder_outputs)\n\n# 히든 상태와 셀 상태를 하나로 묶음\nencoder_states = [state_h, state_c]\n\n\n\n#--------------------------------------------\n# 훈련 모델 디코더 정의\n#--------------------------------------------\n\n# 목표 문장의 인덱스 시퀀스를 입력으로 받음\ndecoder_inputs = layers.Input(shape=(None,))\n\n# 임베딩 레이어\ndecoder_embedding = layers.Embedding(len(words), embedding_dim)\ndecoder_outputs = decoder_embedding(decoder_inputs)\n\n# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴\n# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함\ndecoder_lstm = layers.LSTM(lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True,\n return_sequences=True)\n\n# initial_state를 인코더의 상태로 초기화\ndecoder_outputs, _, _ = decoder_lstm(decoder_outputs,\n initial_state=encoder_states)\n\n# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력\ndecoder_dense = layers.Dense(len(words), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n\n\n#--------------------------------------------\n# 훈련 모델 정의\n#--------------------------------------------\n\n# 입력과 출력으로 함수형 API 모델 생성\nmodel = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n# 학습 방법 설정\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n#--------------------------------------------\n# 예측 모델 인코더 정의\n#--------------------------------------------\n\n# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정\nencoder_model = models.Model(encoder_inputs, encoder_states)\n\n\n\n#--------------------------------------------\n# 예측 모델 디코더 정의\n#--------------------------------------------\n\n# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행\n# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정\ndecoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] \n\n# 임베딩 레이어\ndecoder_outputs = decoder_embedding(decoder_inputs)\n\n# LSTM 레이어\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\n\n# 히든 상태와 셀 상태를 하나로 묶음\ndecoder_states = [state_h, state_c]\n\n# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# 예측 모델 디코더 설정\ndecoder_model = models.Model([decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n\n# 인덱스를 문장으로 변환\ndef convert_index_to_text(indexs, vocabulary): \n \n sentence = ''\n \n # 모든 문장에 대해서 반복\n for index in indexs:\n if index == END_INDEX:\n # 종료 인덱스면 중지\n break;\n if vocabulary.get(index) is not None:\n # 사전에 있는 인덱스면 해당 단어를 추가\n sentence += vocabulary[index]\n else:\n # 사전에 없는 인덱스면 OOV 단어를 추가\n sentence.extend([vocabulary[OOV_INDEX]])\n \n # 빈칸 추가\n sentence += ' '\n\n return sentence\n\n# len(x_decoder)\n#\n# len(y_decoder)\n\n#model.summary()\n\n#encoder_model.summary()\n\n#decoder_model.summary()\n\nfrom tqdm import tqdm\n#에폭 반복\nfor epoch in range(10):\n print('Total Epoch :', epoch + 1)\n\n history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)\n model.summary()\n\n # 정확도와 손실 출력\n print('accuracy :', history.history['accuracy'][-1])\n print('loss :', history.history['loss'][-1])\n\n # 문장 예측 테스트\n # (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)\n input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])\n input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])\n results = model.predict([input_encoder, input_decoder])\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n # 1축을 기준으로 가장 높은 값의 위치를 구함\n indexs = np.argmax(results[0], 1)\n\n # 인덱스를 문장으로 변환\n sentence = convert_index_to_text(indexs, index_to_word)\n\n\n\n#모델 가중치 저장\nmodel.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\nencoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\ndecoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n\n\n# 예측을 위한 입력 생성\ndef make_predict_input(sentence):\n\n sentences = []\n sentences.append(sentence)\n sentences = pos_tag(sentences)\n input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)\n \n return input_seq\n\n# 텍스트 생성\ndef generate_text(input_seq):\n \n # 입력을 인코더에 넣어 마지막 상태 구함\n states = encoder_model.predict(input_seq)\n\n # 목표 시퀀스 초기화\n target_seq = np.zeros((1, 1))\n \n # 목표 시퀀스의 첫 번째에 <START> 태그 추가\n target_seq[0, 0] = STA_INDEX\n \n # 인덱스 초기화\n indexs = []\n \n # 디코더 타임 스텝 반복\n while 1:\n # 디코더로 현재 타임 스텝 출력 구함\n # 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화\n decoder_outputs, state_h, state_c = decoder_model.predict(\n [target_seq] + states)\n\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n \n # 종료 검사\n if index == END_INDEX or len(indexs) >= max_sequences:\n break\n\n # 목표 시퀀스를 바로 이전의 출력으로 설정\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n \n # 디코더의 이전 상태를 다음 디코더 예측에 사용\n states = [state_h, state_c]\n\n # 인덱스를 문장으로 변환\n sentence = convert_index_to_text(indexs, index_to_word)\n \n return sentence\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 놀러가고 싶다')\ninput_seq\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 같이 놀러가고 싶다')\ninput_seq\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n# 문장을 인덱스로 변환\ninput_seq = make_predict_input('3박4일 놀러가려고')\nprint(sentence)\n\n\n# 예측 모델로 텍스트 생성\nsentence = generate_text(input_seq)\nprint(sentence)\n\n\ninput_seq = make_predict_input('SNS 시간낭비인데')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('PPL 너무나 심하네')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가상화폐 망함')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가스불')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가스비')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('가족 보고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('간식 먹고 싶어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('간접흡연 싫어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('감기 기운 잇어')\nsentence = generate_text(input_seq)\nprint(sentence)\n\ninput_seq = make_predict_input('내일 날씨 어떄?')\nsentence = generate_text(input_seq)\nprint(sentence)\n\n\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
class mySeq:
def __init__(self):
self.mseq = ['I', 'II', 'III', 'IV']
def __len__(self):
return len(self.mseq)
def __getitem__(self, key):
if 0 <= key < 4:
return self.mseq[key]
if __name__ == '__main__':
m = mySeq()
print('Len of mySeq : ', len(m))
for i in range(len(m.mseq)):
print(m.mseq[i])
|
normal
|
{
"blob_id": "b86dedad42d092ae97eb21227034e306ca640912",
"index": 5890,
"step-1": "class mySeq:\n <mask token>\n\n def __len__(self):\n return len(self.mseq)\n <mask token>\n\n\n<mask token>\n",
"step-2": "class mySeq:\n\n def __init__(self):\n self.mseq = ['I', 'II', 'III', 'IV']\n\n def __len__(self):\n return len(self.mseq)\n <mask token>\n\n\n<mask token>\n",
"step-3": "class mySeq:\n\n def __init__(self):\n self.mseq = ['I', 'II', 'III', 'IV']\n\n def __len__(self):\n return len(self.mseq)\n\n def __getitem__(self, key):\n if 0 <= key < 4:\n return self.mseq[key]\n\n\n<mask token>\n",
"step-4": "class mySeq:\n\n def __init__(self):\n self.mseq = ['I', 'II', 'III', 'IV']\n\n def __len__(self):\n return len(self.mseq)\n\n def __getitem__(self, key):\n if 0 <= key < 4:\n return self.mseq[key]\n\n\nif __name__ == '__main__':\n m = mySeq()\n print('Len of mySeq : ', len(m))\n for i in range(len(m.mseq)):\n print(m.mseq[i])\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_date(date):
date = date.split('/')
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),
int(date[1]))]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = '1st '
elif int(day_num) == 2:
day_num = '2nd '
elif int(day_num) == 3:
day_num = '3rd '
else:
return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +
month + ' ' + year)
return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)
<|reserved_special_token_0|>
@app.route('/')
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template('index.html', last_post=post)
@app.route('/edit')
def add():
table = db.get_all_posts()
return render_template('edit.html', action='write', table=table)
@app.route('/edit', methods=['POST'])
def edit():
if request.method == 'POST':
title = request.form['title']
under_title = request.form['under_title']
author = request.form['author']
release = time.strftime('%H:%M/%-d/%m/%Y')
content = request.form['content']
if db.add_post(title, under_title, author, release, content):
print('Failed to add post to database!', file=sys.stderr)
return render_template('add.html', post=1)
else:
print('Successfully added post to database!', file=sys.stderr)
return render_template('add.html', post=0)
@app.route('/edit/d')
def d():
pass
@app.route('/posts')
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template('posts.html', posts=posts)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_date(date):
date = date.split('/')
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),
int(date[1]))]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = '1st '
elif int(day_num) == 2:
day_num = '2nd '
elif int(day_num) == 3:
day_num = '3rd '
else:
return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +
month + ' ' + year)
return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)
<|reserved_special_token_0|>
@app.route('/')
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template('index.html', last_post=post)
@app.route('/edit')
def add():
table = db.get_all_posts()
return render_template('edit.html', action='write', table=table)
@app.route('/edit', methods=['POST'])
def edit():
if request.method == 'POST':
title = request.form['title']
under_title = request.form['under_title']
author = request.form['author']
release = time.strftime('%H:%M/%-d/%m/%Y')
content = request.form['content']
if db.add_post(title, under_title, author, release, content):
print('Failed to add post to database!', file=sys.stderr)
return render_template('add.html', post=1)
else:
print('Successfully added post to database!', file=sys.stderr)
return render_template('add.html', post=0)
@app.route('/edit/d')
def d():
pass
@app.route('/posts')
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template('posts.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
db.check()
app.run(host='0.0.0.0', port=8000, debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_date(date):
date = date.split('/')
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),
int(date[1]))]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = '1st '
elif int(day_num) == 2:
day_num = '2nd '
elif int(day_num) == 3:
day_num = '3rd '
else:
return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +
month + ' ' + year)
return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)
app = Flask(__name__)
@app.route('/')
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template('index.html', last_post=post)
@app.route('/edit')
def add():
table = db.get_all_posts()
return render_template('edit.html', action='write', table=table)
@app.route('/edit', methods=['POST'])
def edit():
if request.method == 'POST':
title = request.form['title']
under_title = request.form['under_title']
author = request.form['author']
release = time.strftime('%H:%M/%-d/%m/%Y')
content = request.form['content']
if db.add_post(title, under_title, author, release, content):
print('Failed to add post to database!', file=sys.stderr)
return render_template('add.html', post=1)
else:
print('Successfully added post to database!', file=sys.stderr)
return render_template('add.html', post=0)
@app.route('/edit/d')
def d():
pass
@app.route('/posts')
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template('posts.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
db.check()
app.run(host='0.0.0.0', port=8000, debug=True)
<|reserved_special_token_1|>
from flask import Flask, render_template, request, redirect
import model as db
import sys
import time, calendar
import jsonify
def get_date(date):
date = date.split('/')
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),
int(date[1]))]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = '1st '
elif int(day_num) == 2:
day_num = '2nd '
elif int(day_num) == 3:
day_num = '3rd '
else:
return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +
month + ' ' + year)
return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)
app = Flask(__name__)
@app.route('/')
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template('index.html', last_post=post)
@app.route('/edit')
def add():
table = db.get_all_posts()
return render_template('edit.html', action='write', table=table)
@app.route('/edit', methods=['POST'])
def edit():
if request.method == 'POST':
title = request.form['title']
under_title = request.form['under_title']
author = request.form['author']
release = time.strftime('%H:%M/%-d/%m/%Y')
content = request.form['content']
if db.add_post(title, under_title, author, release, content):
print('Failed to add post to database!', file=sys.stderr)
return render_template('add.html', post=1)
else:
print('Successfully added post to database!', file=sys.stderr)
return render_template('add.html', post=0)
@app.route('/edit/d')
def d():
pass
@app.route('/posts')
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template('posts.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
db.check()
app.run(host='0.0.0.0', port=8000, debug=True)
<|reserved_special_token_1|>
from flask import Flask, render_template, request, redirect
#from gevent.pywsgi import WSGIServer
import model as db
import sys
import time, calendar
import jsonify
def get_date(date):
date = date.split("/")
time = str(date[0])
day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]), int(date[1]))] # .day_abbr[]
day_num = str(int(date[1]))
month = calendar.month_name[int(date[2])]
year = str(date[3])
if int(day_num) == 1:
day_num = "1st "
elif int(day_num) == 2:
day_num = "2nd "
elif int(day_num) == 3:
day_num = "3rd "
else:
return str(time + " " + day_str + ", the " + day_num + "th " + month + " " + year)
return str(time + " " + day_str + ", the " + day_num + month + " " + year)
app = Flask(__name__)
@app.route("/")
def index():
post = db.get_last_post()
post[3] = get_date(post[3])
return render_template("index.html", last_post=post)
@app.route("/edit")
def add():
table = db.get_all_posts()
return render_template("edit.html", action="write", table=table)
@app.route("/edit", methods=["POST"])
def edit():
if request.method == "POST":
title = request.form["title"]
under_title = request.form["under_title"]
author = request.form["author"]
release = time.strftime("%H:%M/%-d/%m/%Y") # HH:MM/dd/mm/yyyy format
content = request.form["content"]
if db.add_post(title, under_title, author, release, content):
print("Failed to add post to database!", file=sys.stderr)
return render_template("add.html", post=1)
else: # successfull
print("Successfully added post to database!", file=sys.stderr)
return render_template("add.html", post=0)
@app.route("/edit/d")
def d():
pass
@app.route("/posts")
def posts():
posts = db.get_all_posts()
for post in posts:
post[3] = get_date(post[3])
return render_template("posts.html", posts=posts)
@app.route("/about")
def about():
return render_template("about.html")
# @app.route("/register", methods=["POST"])
# def get_registration_data():
# if request.method == "POST": # only if website sends sth
# email = request.form["email"] # get userinput via HTML-form
# username = request.form["username"]
# if register_user(username, email): # if sth is wrong with the db
# print("Failed to register!", file=sys.stderr)
# return render_template('register.html',
# action="register",
# status="Failed to register! Please try again!",
# status_color="#ff0033")
# else: # db check successfull
# print("Successfully registered!", file=sys.stderr)
# return render_template('register.html',
# action="finish",
# status="You have been successfully registered!",
# status_color="#08da94",
# username=username)
if __name__ == "__main__":
db.check()
# development/debugging (flask default):
app.run(host="0.0.0.0", port=8000, debug=True)
# basic server, ready for real-life usage [http://localhost:8000/]
#server = WSGIServer(('0.0.0.0', 8000), app)
#server.serve_forever()
|
flexible
|
{
"blob_id": "79390f3ae5dc4cc9105a672d4838a8b1ba53a248",
"index": 3959,
"step-1": "<mask token>\n\n\ndef get_date(date):\n date = date.split('/')\n time = str(date[0])\n day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),\n int(date[1]))]\n day_num = str(int(date[1]))\n month = calendar.month_name[int(date[2])]\n year = str(date[3])\n if int(day_num) == 1:\n day_num = '1st '\n elif int(day_num) == 2:\n day_num = '2nd '\n elif int(day_num) == 3:\n day_num = '3rd '\n else:\n return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +\n month + ' ' + year)\n return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)\n\n\n<mask token>\n\n\[email protected]('/')\ndef index():\n post = db.get_last_post()\n post[3] = get_date(post[3])\n return render_template('index.html', last_post=post)\n\n\[email protected]('/edit')\ndef add():\n table = db.get_all_posts()\n return render_template('edit.html', action='write', table=table)\n\n\[email protected]('/edit', methods=['POST'])\ndef edit():\n if request.method == 'POST':\n title = request.form['title']\n under_title = request.form['under_title']\n author = request.form['author']\n release = time.strftime('%H:%M/%-d/%m/%Y')\n content = request.form['content']\n if db.add_post(title, under_title, author, release, content):\n print('Failed to add post to database!', file=sys.stderr)\n return render_template('add.html', post=1)\n else:\n print('Successfully added post to database!', file=sys.stderr)\n return render_template('add.html', post=0)\n\n\[email protected]('/edit/d')\ndef d():\n pass\n\n\[email protected]('/posts')\ndef posts():\n posts = db.get_all_posts()\n for post in posts:\n post[3] = get_date(post[3])\n return render_template('posts.html', posts=posts)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_date(date):\n date = date.split('/')\n time = str(date[0])\n day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),\n int(date[1]))]\n day_num = str(int(date[1]))\n month = calendar.month_name[int(date[2])]\n year = str(date[3])\n if int(day_num) == 1:\n day_num = '1st '\n elif int(day_num) == 2:\n day_num = '2nd '\n elif int(day_num) == 3:\n day_num = '3rd '\n else:\n return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +\n month + ' ' + year)\n return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)\n\n\n<mask token>\n\n\[email protected]('/')\ndef index():\n post = db.get_last_post()\n post[3] = get_date(post[3])\n return render_template('index.html', last_post=post)\n\n\[email protected]('/edit')\ndef add():\n table = db.get_all_posts()\n return render_template('edit.html', action='write', table=table)\n\n\[email protected]('/edit', methods=['POST'])\ndef edit():\n if request.method == 'POST':\n title = request.form['title']\n under_title = request.form['under_title']\n author = request.form['author']\n release = time.strftime('%H:%M/%-d/%m/%Y')\n content = request.form['content']\n if db.add_post(title, under_title, author, release, content):\n print('Failed to add post to database!', file=sys.stderr)\n return render_template('add.html', post=1)\n else:\n print('Successfully added post to database!', file=sys.stderr)\n return render_template('add.html', post=0)\n\n\[email protected]('/edit/d')\ndef d():\n pass\n\n\[email protected]('/posts')\ndef posts():\n posts = db.get_all_posts()\n for post in posts:\n post[3] = get_date(post[3])\n return render_template('posts.html', posts=posts)\n\n\[email protected]('/about')\ndef about():\n return render_template('about.html')\n\n\nif __name__ == '__main__':\n db.check()\n app.run(host='0.0.0.0', port=8000, debug=True)\n",
"step-3": "<mask token>\n\n\ndef get_date(date):\n date = date.split('/')\n time = str(date[0])\n day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),\n int(date[1]))]\n day_num = str(int(date[1]))\n month = calendar.month_name[int(date[2])]\n year = str(date[3])\n if int(day_num) == 1:\n day_num = '1st '\n elif int(day_num) == 2:\n day_num = '2nd '\n elif int(day_num) == 3:\n day_num = '3rd '\n else:\n return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +\n month + ' ' + year)\n return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n post = db.get_last_post()\n post[3] = get_date(post[3])\n return render_template('index.html', last_post=post)\n\n\[email protected]('/edit')\ndef add():\n table = db.get_all_posts()\n return render_template('edit.html', action='write', table=table)\n\n\[email protected]('/edit', methods=['POST'])\ndef edit():\n if request.method == 'POST':\n title = request.form['title']\n under_title = request.form['under_title']\n author = request.form['author']\n release = time.strftime('%H:%M/%-d/%m/%Y')\n content = request.form['content']\n if db.add_post(title, under_title, author, release, content):\n print('Failed to add post to database!', file=sys.stderr)\n return render_template('add.html', post=1)\n else:\n print('Successfully added post to database!', file=sys.stderr)\n return render_template('add.html', post=0)\n\n\[email protected]('/edit/d')\ndef d():\n pass\n\n\[email protected]('/posts')\ndef posts():\n posts = db.get_all_posts()\n for post in posts:\n post[3] = get_date(post[3])\n return render_template('posts.html', posts=posts)\n\n\[email protected]('/about')\ndef about():\n return render_template('about.html')\n\n\nif __name__ == '__main__':\n db.check()\n app.run(host='0.0.0.0', port=8000, debug=True)\n",
"step-4": "from flask import Flask, render_template, request, redirect\nimport model as db\nimport sys\nimport time, calendar\nimport jsonify\n\n\ndef get_date(date):\n date = date.split('/')\n time = str(date[0])\n day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]),\n int(date[1]))]\n day_num = str(int(date[1]))\n month = calendar.month_name[int(date[2])]\n year = str(date[3])\n if int(day_num) == 1:\n day_num = '1st '\n elif int(day_num) == 2:\n day_num = '2nd '\n elif int(day_num) == 3:\n day_num = '3rd '\n else:\n return str(time + ' ' + day_str + ', the ' + day_num + 'th ' +\n month + ' ' + year)\n return str(time + ' ' + day_str + ', the ' + day_num + month + ' ' + year)\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n post = db.get_last_post()\n post[3] = get_date(post[3])\n return render_template('index.html', last_post=post)\n\n\[email protected]('/edit')\ndef add():\n table = db.get_all_posts()\n return render_template('edit.html', action='write', table=table)\n\n\[email protected]('/edit', methods=['POST'])\ndef edit():\n if request.method == 'POST':\n title = request.form['title']\n under_title = request.form['under_title']\n author = request.form['author']\n release = time.strftime('%H:%M/%-d/%m/%Y')\n content = request.form['content']\n if db.add_post(title, under_title, author, release, content):\n print('Failed to add post to database!', file=sys.stderr)\n return render_template('add.html', post=1)\n else:\n print('Successfully added post to database!', file=sys.stderr)\n return render_template('add.html', post=0)\n\n\[email protected]('/edit/d')\ndef d():\n pass\n\n\[email protected]('/posts')\ndef posts():\n posts = db.get_all_posts()\n for post in posts:\n post[3] = get_date(post[3])\n return render_template('posts.html', posts=posts)\n\n\[email protected]('/about')\ndef about():\n return render_template('about.html')\n\n\nif __name__ == '__main__':\n db.check()\n app.run(host='0.0.0.0', port=8000, debug=True)\n",
"step-5": "from flask import Flask, render_template, request, redirect\n#from gevent.pywsgi import WSGIServer\nimport model as db\nimport sys\nimport time, calendar\nimport jsonify\n\ndef get_date(date):\n date = date.split(\"/\")\n time = str(date[0])\n day_str = calendar.day_name[calendar.weekday(int(date[3]), int(date[2]), int(date[1]))] # .day_abbr[]\n day_num = str(int(date[1]))\n month = calendar.month_name[int(date[2])]\n year = str(date[3])\n if int(day_num) == 1:\n day_num = \"1st \"\n elif int(day_num) == 2:\n day_num = \"2nd \"\n elif int(day_num) == 3:\n day_num = \"3rd \"\n else:\n return str(time + \" \" + day_str + \", the \" + day_num + \"th \" + month + \" \" + year)\n\n return str(time + \" \" + day_str + \", the \" + day_num + month + \" \" + year)\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n post = db.get_last_post()\n post[3] = get_date(post[3])\n return render_template(\"index.html\", last_post=post)\n\[email protected](\"/edit\")\ndef add():\n table = db.get_all_posts()\n return render_template(\"edit.html\", action=\"write\", table=table)\n\[email protected](\"/edit\", methods=[\"POST\"])\ndef edit():\n if request.method == \"POST\":\n title = request.form[\"title\"]\n under_title = request.form[\"under_title\"]\n author = request.form[\"author\"]\n release = time.strftime(\"%H:%M/%-d/%m/%Y\") # HH:MM/dd/mm/yyyy format\n content = request.form[\"content\"]\n if db.add_post(title, under_title, author, release, content):\n print(\"Failed to add post to database!\", file=sys.stderr)\n return render_template(\"add.html\", post=1)\n else: # successfull\n print(\"Successfully added post to database!\", file=sys.stderr)\n return render_template(\"add.html\", post=0)\n\[email protected](\"/edit/d\")\ndef d():\n pass\n\[email protected](\"/posts\")\ndef posts():\n posts = db.get_all_posts()\n for post in posts:\n post[3] = get_date(post[3])\n return render_template(\"posts.html\", posts=posts)\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n# @app.route(\"/register\", methods=[\"POST\"])\n# def get_registration_data():\n# if request.method == \"POST\": # only if website sends sth\n# email = request.form[\"email\"] # get userinput via HTML-form\n# username = request.form[\"username\"]\n# if register_user(username, email): # if sth is wrong with the db\n# print(\"Failed to register!\", file=sys.stderr)\n# return render_template('register.html',\n# action=\"register\",\n# status=\"Failed to register! Please try again!\",\n# status_color=\"#ff0033\")\n# else: # db check successfull\n# print(\"Successfully registered!\", file=sys.stderr)\n# return render_template('register.html',\n# action=\"finish\",\n# status=\"You have been successfully registered!\",\n# status_color=\"#08da94\",\n# username=username)\n\nif __name__ == \"__main__\":\n db.check()\n # development/debugging (flask default):\n app.run(host=\"0.0.0.0\", port=8000, debug=True)\n\n # basic server, ready for real-life usage [http://localhost:8000/]\n #server = WSGIServer(('0.0.0.0', 8000), app)\n #server.serve_forever()\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class TwoPair(RankedHand):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(
other) == -1:
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif self.three_kind_rank < other.three_kind_rank:
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif self.four_kind_rank < other.four_kind_rank:
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) < other.low_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == -1:
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(
other) == -1:
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif self.three_kind_rank < other.three_kind_rank:
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif self.four_kind_rank < other.four_kind_rank:
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
<|reserved_special_token_1|>
class RankedHand(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def compare_high_cards(self, other):
s_cards = reversed(sorted(self.remaining_cards()))
o_cards = reversed(sorted(other.remaining_cards()))
for card_pair in zip(s_cards, o_cards):
print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair
[1])))
if card_pair[0] > card_pair[1]:
return 1
elif card_pair[0] < card_pair[1]:
return -1
return 0
def __eq__(self, other):
return self.rank == other.rank
def __lt__(self, other):
return self.rank < other.rank
class HighCard(RankedHand):
def __init__(self, remaining_cards):
super(HighCard, self).__init__(remaining_cards)
self.rank = 0
def __eq__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class OnePair(RankedHand):
def __init__(self, pair_cards, remaining_cards):
super(OnePair, self).__init__(remaining_cards)
self.rank = 1
self.pair_cards = pair_cards
def __eq__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__eq__(other)
else:
return (self.pair_cards == other.pair_cards and self.
compare_high_cards(other) == 0)
def __lt__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__lt__(other)
else:
return (self.pair_cards < other.pair_cards or self.pair_cards ==
other.pair_cards and self.compare_high_cards(other) == -1)
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
def high_pair(self):
return self.two_pair_ranks[1]
def low_pair(self):
return self.two_pair_ranks[0]
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) < other.low_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == -1:
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(
other) == -1:
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif self.three_kind_rank < other.three_kind_rank:
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif self.four_kind_rank < other.four_kind_rank:
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
<|reserved_special_token_1|>
class RankedHand(object):
def __init__(self, remaining_cards):
self._remaining_cards = remaining_cards
self.rank = None
<|reserved_special_token_0|>
def compare_high_cards(self, other):
s_cards = reversed(sorted(self.remaining_cards()))
o_cards = reversed(sorted(other.remaining_cards()))
for card_pair in zip(s_cards, o_cards):
print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair
[1])))
if card_pair[0] > card_pair[1]:
return 1
elif card_pair[0] < card_pair[1]:
return -1
return 0
def __eq__(self, other):
return self.rank == other.rank
def __lt__(self, other):
return self.rank < other.rank
class HighCard(RankedHand):
def __init__(self, remaining_cards):
super(HighCard, self).__init__(remaining_cards)
self.rank = 0
def __eq__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class OnePair(RankedHand):
def __init__(self, pair_cards, remaining_cards):
super(OnePair, self).__init__(remaining_cards)
self.rank = 1
self.pair_cards = pair_cards
def __eq__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__eq__(other)
else:
return (self.pair_cards == other.pair_cards and self.
compare_high_cards(other) == 0)
def __lt__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__lt__(other)
else:
return (self.pair_cards < other.pair_cards or self.pair_cards ==
other.pair_cards and self.compare_high_cards(other) == -1)
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
def high_pair(self):
return self.two_pair_ranks[1]
def low_pair(self):
return self.two_pair_ranks[0]
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) < other.low_pair():
return True
elif self.high_pair() == other.high_pair() and self.low_pair(
) == other.low_pair() and self.compare_high_cards(other) == -1:
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(
other) == -1:
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif self.three_kind_rank < other.three_kind_rank:
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif self.four_kind_rank < other.four_kind_rank:
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
<|reserved_special_token_1|>
class RankedHand(object):
def __init__(self, remaining_cards):
self._remaining_cards = remaining_cards
self.rank = None
def remaining_cards(self):
return self._remaining_cards
# Returns 1 if self is higher, 0 if equal, -1 if self is lower
def compare_high_cards(self, other):
s_cards = reversed(sorted(self.remaining_cards()))
o_cards = reversed(sorted(other.remaining_cards()))
for card_pair in zip(s_cards, o_cards):
print("Comparing %s and %s" % (str(card_pair[0]), str(card_pair[1])))
if(card_pair[0] > card_pair[1]):
return 1
elif(card_pair[0] < card_pair[1]):
return -1
return 0
def __eq__(self, other):
return self.rank == other.rank
def __lt__(self, other):
return self.rank < other.rank
class HighCard(RankedHand):
def __init__(self, remaining_cards):
super(HighCard, self).__init__(remaining_cards)
self.rank = 0
def __eq__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(HighCard, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class OnePair(RankedHand):
def __init__(self, pair_cards, remaining_cards):
super(OnePair, self).__init__(remaining_cards)
self.rank = 1
self.pair_cards = pair_cards
def __eq__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__eq__(other)
else:
return self.pair_cards == other.pair_cards and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(OnePair, self).__lt__(other)
else:
return self.pair_cards < other.pair_cards or (self.pair_cards == other.pair_cards and self.compare_high_cards(other) == -1)
class TwoPair(RankedHand):
def __init__(self, two_pair_ranks, remaining_card):
super(TwoPair, self).__init__(remaining_card)
self.two_pair_ranks = sorted(two_pair_ranks)
self.rank = 2
def high_pair(self):
return self.two_pair_ranks[1]
def low_pair(self):
return self.two_pair_ranks[0]
def __eq__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__eq__(other)
else:
return self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(TwoPair, self).__lt__(other)
if self.high_pair() < other.high_pair():
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() < other.low_pair()):
return True
elif(self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == -1):
return True
else:
return False
class ThreeKind(RankedHand):
def __init__(self, three_kind_rank):
self.rank = 3
self.three_kind_rank = three_kind_rank
def __eq__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(ThreeKind, self).__lt__(other)
if self.three_kind_rank < other.three_kind_rank:
return True
elif(self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(other) == -1):
return True
else:
return False
class Straight(RankedHand):
def __init__(self, all_cards):
super(Straight, self).__init__(all_cards)
self.rank = 4
# Account for Ace low
if 14 in all_cards and 2 in all_cards:
tmp = all_cards
tmp.remove(14)
self.straight_rank = max(tmp)
else:
self.straight_rank = max(all_cards)
def __eq__(self, other):
if self.rank != other.rank:
return super(Straight, self).__eq__(other)
else:
return self.straight_rank == other.straight_rank
def __lt__(self, other):
if self.rank != other.rank:
return super(Straight, self).__lt__(other)
else:
return self.straight_rank < other.straight_rank
class Flush(RankedHand):
def __init__(self, all_cards):
super(Flush, self).__init__(all_cards)
self.rank = 5
def __eq__(self, other):
if self.rank != other.rank:
return super(Flush, self).__eq__(other)
else:
return self.compare_high_cards(other) == 0
def __lt__(self, other):
if self.rank != other.rank:
return super(Flush, self).__lt__(other)
else:
return self.compare_high_cards(other) == -1
class FullHouse(RankedHand):
def __init__(self, three_kind_rank):
super(FullHouse, self).__init__([])
self.three_kind_rank = three_kind_rank
self.rank = 6
def __eq__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__eq__(other)
else:
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FullHouse, self).__lt__(other)
elif(self.three_kind_rank < other.three_kind_rank):
return True
else:
return False
class FourKind(RankedHand):
def __init__(self, four_kind_rank):
self.four_kind_rank = four_kind_rank
self.rank = 7
def __eq__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__eq__(other)
return False # Can't be equal
def __lt__(self, other):
if self.rank != other.rank:
return super(FourKind, self).__lt__(other)
elif(self.four_kind_rank < other.four_kind_rank):
return True
else:
return False
class StraightFlush(Straight):
def __init__(self, all_cards):
super(StraightFlush, self).__init__(all_cards)
self.rank = 8
class RoyalFlush(RankedHand):
def __init__(self):
self.rank = 9
|
flexible
|
{
"blob_id": "a0d1ef11d00e2ddd65b648a87f493b7adcda5115",
"index": 9412,
"step-1": "<mask token>\n\n\nclass TwoPair(RankedHand):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-2": "<mask token>\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n <mask token>\n <mask token>\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-3": "class RankedHand(object):\n <mask token>\n <mask token>\n\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair\n [1])))\n if card_pair[0] > card_pair[1]:\n return 1\n elif card_pair[0] < card_pair[1]:\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\n\nclass HighCard(RankedHand):\n\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass OnePair(RankedHand):\n\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return (self.pair_cards == other.pair_cards and self.\n compare_high_cards(other) == 0)\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return (self.pair_cards < other.pair_cards or self.pair_cards ==\n other.pair_cards and self.compare_high_cards(other) == -1)\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-4": "class RankedHand(object):\n\n def __init__(self, remaining_cards):\n self._remaining_cards = remaining_cards\n self.rank = None\n <mask token>\n\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print('Comparing %s and %s' % (str(card_pair[0]), str(card_pair\n [1])))\n if card_pair[0] > card_pair[1]:\n return 1\n elif card_pair[0] < card_pair[1]:\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\n\nclass HighCard(RankedHand):\n\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass OnePair(RankedHand):\n\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return (self.pair_cards == other.pair_cards and self.\n compare_high_cards(other) == 0)\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return (self.pair_cards < other.pair_cards or self.pair_cards ==\n other.pair_cards and self.compare_high_cards(other) == -1)\n\n\nclass TwoPair(RankedHand):\n\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) < other.low_pair():\n return True\n elif self.high_pair() == other.high_pair() and self.low_pair(\n ) == other.low_pair() and self.compare_high_cards(other) == -1:\n return True\n else:\n return False\n\n\nclass ThreeKind(RankedHand):\n\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(\n other) == -1:\n return True\n else:\n return False\n\n\nclass Straight(RankedHand):\n\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\n\nclass Flush(RankedHand):\n\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\n\nclass FullHouse(RankedHand):\n\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif self.three_kind_rank < other.three_kind_rank:\n return True\n else:\n return False\n\n\nclass FourKind(RankedHand):\n\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif self.four_kind_rank < other.four_kind_rank:\n return True\n else:\n return False\n\n\nclass StraightFlush(Straight):\n\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\n\nclass RoyalFlush(RankedHand):\n\n def __init__(self):\n self.rank = 9\n",
"step-5": "class RankedHand(object):\n def __init__(self, remaining_cards):\n self._remaining_cards = remaining_cards\n self.rank = None\n\n def remaining_cards(self):\n return self._remaining_cards\n\n # Returns 1 if self is higher, 0 if equal, -1 if self is lower\n def compare_high_cards(self, other):\n s_cards = reversed(sorted(self.remaining_cards()))\n o_cards = reversed(sorted(other.remaining_cards()))\n for card_pair in zip(s_cards, o_cards):\n print(\"Comparing %s and %s\" % (str(card_pair[0]), str(card_pair[1])))\n if(card_pair[0] > card_pair[1]):\n return 1\n elif(card_pair[0] < card_pair[1]):\n return -1\n return 0\n\n def __eq__(self, other):\n return self.rank == other.rank\n\n def __lt__(self, other):\n return self.rank < other.rank\n\nclass HighCard(RankedHand):\n def __init__(self, remaining_cards):\n super(HighCard, self).__init__(remaining_cards)\n self.rank = 0\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(HighCard, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\nclass OnePair(RankedHand):\n def __init__(self, pair_cards, remaining_cards):\n super(OnePair, self).__init__(remaining_cards)\n self.rank = 1\n self.pair_cards = pair_cards\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__eq__(other)\n else:\n return self.pair_cards == other.pair_cards and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(OnePair, self).__lt__(other)\n else:\n return self.pair_cards < other.pair_cards or (self.pair_cards == other.pair_cards and self.compare_high_cards(other) == -1)\n\nclass TwoPair(RankedHand):\n def __init__(self, two_pair_ranks, remaining_card):\n super(TwoPair, self).__init__(remaining_card)\n self.two_pair_ranks = sorted(two_pair_ranks)\n self.rank = 2\n\n def high_pair(self):\n return self.two_pair_ranks[1]\n\n def low_pair(self):\n return self.two_pair_ranks[0]\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__eq__(other)\n else:\n return self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(TwoPair, self).__lt__(other)\n if self.high_pair() < other.high_pair():\n return True\n elif(self.high_pair() == other.high_pair() and self.low_pair() < other.low_pair()):\n return True\n elif(self.high_pair() == other.high_pair() and self.low_pair() == other.low_pair() and self.compare_high_cards(other) == -1):\n return True\n else:\n return False\n\nclass ThreeKind(RankedHand):\n def __init__(self, three_kind_rank):\n self.rank = 3\n self.three_kind_rank = three_kind_rank\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__eq__(other)\n else:\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(ThreeKind, self).__lt__(other)\n if self.three_kind_rank < other.three_kind_rank:\n return True\n elif(self.three_kind_rank == other.three_kind_rank and self.compare_high_cards(other) == -1):\n return True\n else:\n return False\n\nclass Straight(RankedHand):\n def __init__(self, all_cards):\n super(Straight, self).__init__(all_cards)\n self.rank = 4\n # Account for Ace low\n if 14 in all_cards and 2 in all_cards:\n tmp = all_cards\n tmp.remove(14)\n self.straight_rank = max(tmp)\n else:\n self.straight_rank = max(all_cards)\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__eq__(other)\n else:\n return self.straight_rank == other.straight_rank\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Straight, self).__lt__(other)\n else:\n return self.straight_rank < other.straight_rank\n\nclass Flush(RankedHand):\n def __init__(self, all_cards):\n super(Flush, self).__init__(all_cards)\n self.rank = 5\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__eq__(other)\n else:\n return self.compare_high_cards(other) == 0\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(Flush, self).__lt__(other)\n else:\n return self.compare_high_cards(other) == -1\n\nclass FullHouse(RankedHand):\n def __init__(self, three_kind_rank):\n super(FullHouse, self).__init__([])\n self.three_kind_rank = three_kind_rank\n self.rank = 6\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__eq__(other)\n else:\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FullHouse, self).__lt__(other)\n elif(self.three_kind_rank < other.three_kind_rank):\n return True\n else:\n return False\n\nclass FourKind(RankedHand):\n def __init__(self, four_kind_rank):\n self.four_kind_rank = four_kind_rank\n self.rank = 7\n\n def __eq__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__eq__(other)\n return False # Can't be equal\n\n def __lt__(self, other):\n if self.rank != other.rank:\n return super(FourKind, self).__lt__(other)\n elif(self.four_kind_rank < other.four_kind_rank):\n return True\n else:\n return False\n\nclass StraightFlush(Straight):\n def __init__(self, all_cards):\n super(StraightFlush, self).__init__(all_cards)\n self.rank = 8\n\nclass RoyalFlush(RankedHand):\n def __init__(self):\n self.rank = 9\n\n\n\n\n",
"step-ids": [
25,
28,
42,
43,
45
]
}
|
[
25,
28,
42,
43,
45
] |
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
import matplotlib.pyplot as plt
from sympy import sympify, Symbol
curr_pos = 0
import numpy as np
def bisection(st,maxnum,maxer,xlf,xuf):
file2 = open("test.txt","w")
file2.write("Hello World")
file2.close()
fi = open("test.txt", "w")
x=sp.Symbol('x')
y=sp.Symbol('y')
H = sympify(st)
print(H)
table = []
x1=[]
y1=[]
xu=[]
xl=[]
xks=[]
ys=[]
errors=[]
plots=[]
print(float(H.subs(x,0)))
ys.append(float(H.subs(x,xuf)))
ys.append(float(H.subs(x,xlf)))
i=0.0
err=1
maxsize=maxnum
print(maxnum)
for i in range(0, maxsize, 1):
xl.append(xlf)
xu.append(xuf)
print('xl ='+ str(xlf))
print('xu ='+ str(xuf))
if(err<=maxer):
break
xk=xlf+xuf
xk=xk/2
print('xk ='+ str(xk))
x2=[xk,xk]
y2=[-100,100]
plots.append((x2,y2))
xks.append(xk)
if i==0:
errors.append(1.0)
print(i)
else:
err=abs((xks[i]-xks[i-1]))
print(str((xks[i]-xks[i-1])))
errors.append(err)
f=float(H.subs(x,xk))
print("fk ="+str(f))
f2=float(H.subs(x,xlf))
print("fl ="+str(f2))
f3=f*f2
ys.append(f)
print (xl[0],xu[0])
print(f)
table.append([xuf,xlf,xk])
if f3<0:
xuf=xk
else:
xlf=xk
i=min([xl[0],xu[0]])
add=(abs((xu[0])-(xl[0]))/100)
print ("min = "+str(i)+" add = "+str(add)+ "max = "+str(max([xl[0],xu[0]])))
while i <= max([xl[0],xu[0]]):
x1.append(i)
print("x="+str(i)+ " y = "+str(float(H.subs(x,i))))
y1.append(float(H.subs(x,i)))
i=i+add
teams_list = ["Xu", "Xl", "Xr"]
row_format ="{:>15}" * (len(teams_list) + 1)
fi.write(row_format.format("", *teams_list))
print (row_format.format("", *teams_list))
for row in table:
print (row_format.format("", *row))
fi.write(row_format.format("", *row))
fi.close()
def key_event(e):
global curr_pos
if e.key == "right":
curr_pos = curr_pos + 1
elif e.key == "left":
curr_pos = curr_pos - 1
else:
return
curr_pos = curr_pos % len(plots)
axes = plt.gca()
ax.cla()
axes.set_xlim([xl[0],xu[0]])
axes.set_ylim([min(ys),max(ys)])
ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')
plt.title("Iteration "+str(curr_pos+1)+" xr= "+str(xks[curr_pos])+" errors= "+str(errors[curr_pos]*100)+"%")
fig.canvas.draw()
plots2 = [(x1,y1)]
curr_pos = 0
print(xl)
fig = plt.figure()
axes = plt.gca()
axes.set_xlim([xl[0],xu[0]])
axes.set_ylim([min(ys),max(ys)])
fig.canvas.mpl_connect('key_press_event', key_event)
ax = fig.add_subplot(111)
plt.title("Iteration "+str(curr_pos+1)+" xr= "+str(xks[curr_pos])+" errors= "+str(errors[curr_pos]*100)+"%")
ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')
plt.show()
bisection('(3/2)*(x)-6-(1/2)*sin(2*x)',50,1*10**-3,4,5)
|
normal
|
{
"blob_id": "a1c1f18e7b95f36a214a1a16f2434be2825829c3",
"index": 3110,
"step-1": "<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-3": "<mask token>\ncurr_pos = 0\n<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sp\nimport matplotlib.pyplot as plt\nfrom sympy import sympify, Symbol\ncurr_pos = 0\nimport numpy as np\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sp\nimport matplotlib.pyplot as plt\nfrom sympy import sympify, Symbol\ncurr_pos = 0\nimport numpy as np\n\ndef bisection(st,maxnum,maxer,xlf,xuf):\n file2 = open(\"test.txt\",\"w\") \n file2.write(\"Hello World\") \n file2.close() \n fi = open(\"test.txt\", \"w\")\n x=sp.Symbol('x')\n y=sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1=[]\n y1=[]\n xu=[]\n xl=[]\n xks=[]\n ys=[]\n errors=[]\n plots=[]\n print(float(H.subs(x,0)))\n ys.append(float(H.subs(x,xuf)))\n ys.append(float(H.subs(x,xlf)))\n i=0.0\n err=1\n maxsize=maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl ='+ str(xlf))\n print('xu ='+ str(xuf))\n if(err<=maxer):\n break\n xk=xlf+xuf\n xk=xk/2\n print('xk ='+ str(xk))\n x2=[xk,xk]\n y2=[-100,100]\n plots.append((x2,y2))\n xks.append(xk)\n if i==0:\n errors.append(1.0)\n print(i)\n else:\n err=abs((xks[i]-xks[i-1]))\n print(str((xks[i]-xks[i-1])))\n errors.append(err)\n f=float(H.subs(x,xk))\n print(\"fk =\"+str(f))\n f2=float(H.subs(x,xlf))\n print(\"fl =\"+str(f2))\n f3=f*f2\n ys.append(f)\n print (xl[0],xu[0])\n print(f)\n table.append([xuf,xlf,xk])\n if f3<0:\n xuf=xk\n else:\n xlf=xk \n i=min([xl[0],xu[0]])\n add=(abs((xu[0])-(xl[0]))/100)\n print (\"min = \"+str(i)+\" add = \"+str(add)+ \"max = \"+str(max([xl[0],xu[0]])))\n while i <= max([xl[0],xu[0]]):\n x1.append(i)\n print(\"x=\"+str(i)+ \" y = \"+str(float(H.subs(x,i))))\n y1.append(float(H.subs(x,i)))\n i=i+add\n teams_list = [\"Xu\", \"Xl\", \"Xr\"]\n row_format =\"{:>15}\" * (len(teams_list) + 1)\n fi.write(row_format.format(\"\", *teams_list))\n print (row_format.format(\"\", *teams_list))\n for row in table:\n print (row_format.format(\"\", *row))\n fi.write(row_format.format(\"\", *row))\n fi.close()\n def key_event(e):\n global curr_pos\n\n if e.key == \"right\":\n curr_pos = curr_pos + 1\n elif e.key == \"left\":\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0],xu[0]])\n axes.set_ylim([min(ys),max(ys)])\n ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')\n plt.title(\"Iteration \"+str(curr_pos+1)+\" xr= \"+str(xks[curr_pos])+\" errors= \"+str(errors[curr_pos]*100)+\"%\")\n fig.canvas.draw() \n plots2 = [(x1,y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0],xu[0]])\n axes.set_ylim([min(ys),max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title(\"Iteration \"+str(curr_pos+1)+\" xr= \"+str(xks[curr_pos])+\" errors= \"+str(errors[curr_pos]*100)+\"%\")\n ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')\n plt.show()\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)',50,1*10**-3,4,5)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Renderer:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6):
horLines = Line(Point(0, i * self.height / 6), Point(self.width,
i * self.height / 6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j * self.width / 7, 0), Point(j * self.
width / 7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0, 6):
for x in range(0, 7):
slot = Circle(Point(x * self.width / 7 + 50, y * self.
height / 6 + 50), 37.5)
slot.setFill('white')
slot.draw(self.win)
<|reserved_special_token_0|>
def end(self):
self.get_window().close()
class Menu:
def __init__(self, window):
self.window = window
skyBlue = color_rgb(135, 206, 250)
royalBlue = color_rgb(65, 105, 225)
self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,
0.8 * 500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 *
500, 0.35 * 500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')
self.saveTxt.setSize(30)
self.saveTxt.setFace('helvetica')
self.saveTxt.setStyle('bold')
self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 *
500, 0.55 * 500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')
self.loadTxt.setSize(30)
self.loadTxt.setFace('helvetica')
self.loadTxt.setStyle('bold')
self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 *
500, 0.75 * 500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')
self.quitTxt.setSize(30)
self.quitTxt.setFace('helvetica')
self.quitTxt.setStyle('bold')
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Renderer:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6):
horLines = Line(Point(0, i * self.height / 6), Point(self.width,
i * self.height / 6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j * self.width / 7, 0), Point(j * self.
width / 7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0, 6):
for x in range(0, 7):
slot = Circle(Point(x * self.width / 7 + 50, y * self.
height / 6 + 50), 37.5)
slot.setFill('white')
slot.draw(self.win)
def update_pieces(self, x, y, color):
board = self.engine.get_board()
pointY = y * self.height / 6
pointX = x * self.width / 7
piece = Circle(Point(pointX + 50, pointY + 50), 37.5)
if color == 'r':
piece.setFill('red')
else:
piece.setFill('black')
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu:
def __init__(self, window):
self.window = window
skyBlue = color_rgb(135, 206, 250)
royalBlue = color_rgb(65, 105, 225)
self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,
0.8 * 500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 *
500, 0.35 * 500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')
self.saveTxt.setSize(30)
self.saveTxt.setFace('helvetica')
self.saveTxt.setStyle('bold')
self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 *
500, 0.55 * 500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')
self.loadTxt.setSize(30)
self.loadTxt.setFace('helvetica')
self.loadTxt.setStyle('bold')
self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 *
500, 0.75 * 500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')
self.quitTxt.setSize(30)
self.quitTxt.setFace('helvetica')
self.quitTxt.setStyle('bold')
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Renderer:
def __init__(self, engine, width=700, height=600):
self.width = width
self.height = height
self.engine = engine
self.win = GraphWin('Game Board', width, height)
self.win.setBackground('blue')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6):
horLines = Line(Point(0, i * self.height / 6), Point(self.width,
i * self.height / 6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j * self.width / 7, 0), Point(j * self.
width / 7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0, 6):
for x in range(0, 7):
slot = Circle(Point(x * self.width / 7 + 50, y * self.
height / 6 + 50), 37.5)
slot.setFill('white')
slot.draw(self.win)
def update_pieces(self, x, y, color):
board = self.engine.get_board()
pointY = y * self.height / 6
pointX = x * self.width / 7
piece = Circle(Point(pointX + 50, pointY + 50), 37.5)
if color == 'r':
piece.setFill('red')
else:
piece.setFill('black')
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu:
def __init__(self, window):
self.window = window
skyBlue = color_rgb(135, 206, 250)
royalBlue = color_rgb(65, 105, 225)
self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,
0.8 * 500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 *
500, 0.35 * 500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')
self.saveTxt.setSize(30)
self.saveTxt.setFace('helvetica')
self.saveTxt.setStyle('bold')
self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 *
500, 0.55 * 500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')
self.loadTxt.setSize(30)
self.loadTxt.setFace('helvetica')
self.loadTxt.setStyle('bold')
self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 *
500, 0.75 * 500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')
self.quitTxt.setSize(30)
self.quitTxt.setFace('helvetica')
self.quitTxt.setStyle('bold')
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Renderer:
def __init__(self, engine, width=700, height=600):
self.width = width
self.height = height
self.engine = engine
self.win = GraphWin('Game Board', width, height)
self.win.setBackground('blue')
def update(self):
self.win.update()
def get_window(self):
return self.win
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6):
horLines = Line(Point(0, i * self.height / 6), Point(self.width,
i * self.height / 6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j * self.width / 7, 0), Point(j * self.
width / 7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0, 6):
for x in range(0, 7):
slot = Circle(Point(x * self.width / 7 + 50, y * self.
height / 6 + 50), 37.5)
slot.setFill('white')
slot.draw(self.win)
def update_pieces(self, x, y, color):
board = self.engine.get_board()
pointY = y * self.height / 6
pointX = x * self.width / 7
piece = Circle(Point(pointX + 50, pointY + 50), 37.5)
if color == 'r':
piece.setFill('red')
else:
piece.setFill('black')
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu:
def __init__(self, window):
self.window = window
skyBlue = color_rgb(135, 206, 250)
royalBlue = color_rgb(65, 105, 225)
self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,
0.8 * 500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 *
500, 0.35 * 500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')
self.saveTxt.setSize(30)
self.saveTxt.setFace('helvetica')
self.saveTxt.setStyle('bold')
self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 *
500, 0.55 * 500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')
self.loadTxt.setSize(30)
self.loadTxt.setFace('helvetica')
self.loadTxt.setStyle('bold')
self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 *
500, 0.75 * 500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')
self.quitTxt.setSize(30)
self.quitTxt.setFace('helvetica')
self.quitTxt.setStyle('bold')
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
<|reserved_special_token_1|>
import graphics
from graphics import *
class Renderer():
def __init__(self, engine, width=700, height=600):
self.width = width
self.height = height
self.engine = engine
self.win = GraphWin("Game Board", width, height)
self.win.setBackground("blue")
def update(self):
self.win.update()
def get_window(self):
return(self.win)
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6): #Determines size of terrain
horLines = Line(Point(0, i*self.height/6),Point(self.width, i*self.height/6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j*self.width/7, 0),Point(j*self.width/7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0,6):
for x in range(0,7):
slot = Circle(Point(x*self.width/7+50,y*self.height/6+50),37.5)
slot.setFill("white")
slot.draw(self.win)
def update_pieces(self,x,y,color):
board = self.engine.get_board()
pointY = y*self.height/6
pointX = x*self.width/7
piece = Circle(Point(pointX+50,pointY+50),37.5)
if color == 'r':
piece.setFill("red")
else:
piece.setFill("black")
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu(): #CHANGE TO SELF. WIDTH AND HIEGHT
def __init__(self,window):
self.window = window
skyBlue = color_rgb(135,206,250)
royalBlue = color_rgb(65,105,225)
self.menu = Rectangle(Point(.2*500,.15*500),Point(.8*500,.8*500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(.25*500,.2*500),Point(.75*500,.35*500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(.50*500,.275*500), "SAVE")
self.saveTxt.setSize(30)
self.saveTxt.setFace("helvetica")
self.saveTxt.setStyle("bold")
self.load = Rectangle(Point(.25*500,.4*500),Point(.75*500,.55*500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(.50*500,.475*500), "LOAD")
self.loadTxt.setSize(30)
self.loadTxt.setFace("helvetica")
self.loadTxt.setStyle("bold")
self.quit = Rectangle(Point(.25*500,.6*500),Point(.75*500,.75*500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(.50*500,.675*500), "QUIT")
self.quitTxt.setSize(30)
self.quitTxt.setFace("helvetica")
self.quitTxt.setStyle("bold")
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
|
flexible
|
{
"blob_id": "85a3682f144f02aa412d45c901f76c65de2e816d",
"index": 5599,
"step-1": "<mask token>\n\n\nclass Renderer:\n <mask token>\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n <mask token>\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-2": "<mask token>\n\n\nclass Renderer:\n <mask token>\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-3": "<mask token>\n\n\nclass Renderer:\n\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin('Game Board', width, height)\n self.win.setBackground('blue')\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-4": "<mask token>\n\n\nclass Renderer:\n\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin('Game Board', width, height)\n self.win.setBackground('blue')\n\n def update(self):\n self.win.update()\n\n def get_window(self):\n return self.win\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-5": "import graphics \nfrom graphics import *\n\nclass Renderer():\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin(\"Game Board\", width, height)\n self.win.setBackground(\"blue\")\n\n def update(self):\n self.win.update()\n\n\n def get_window(self):\n return(self.win)\n\n\n def get_width(self):\n return self.width\n\n\n def draw_board(self):\n for i in range(0, 6): #Determines size of terrain\n horLines = Line(Point(0, i*self.height/6),Point(self.width, i*self.height/6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n\n for j in range(0, 7):\n verLines = Line(Point(j*self.width/7, 0),Point(j*self.width/7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n\n for y in range(0,6):\n for x in range(0,7):\n slot = Circle(Point(x*self.width/7+50,y*self.height/6+50),37.5)\n slot.setFill(\"white\")\n slot.draw(self.win)\n\n def update_pieces(self,x,y,color):\n board = self.engine.get_board()\n pointY = y*self.height/6\n pointX = x*self.width/7\n piece = Circle(Point(pointX+50,pointY+50),37.5)\n if color == 'r':\n piece.setFill(\"red\")\n else:\n piece.setFill(\"black\")\n piece.draw(self.win)\n\n\n def end(self):\n self.get_window().close()\n\nclass Menu(): #CHANGE TO SELF. WIDTH AND HIEGHT\n def __init__(self,window):\n self.window = window\n\n skyBlue = color_rgb(135,206,250)\n royalBlue = color_rgb(65,105,225)\n\n self.menu = Rectangle(Point(.2*500,.15*500),Point(.8*500,.8*500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n\n self.save = Rectangle(Point(.25*500,.2*500),Point(.75*500,.35*500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n\n self.saveTxt = Text(Point(.50*500,.275*500), \"SAVE\")\n self.saveTxt.setSize(30)\n self.saveTxt.setFace(\"helvetica\")\n self.saveTxt.setStyle(\"bold\")\n\n self.load = Rectangle(Point(.25*500,.4*500),Point(.75*500,.55*500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n\n self.loadTxt = Text(Point(.50*500,.475*500), \"LOAD\")\n self.loadTxt.setSize(30)\n self.loadTxt.setFace(\"helvetica\")\n self.loadTxt.setStyle(\"bold\")\n\n self.quit = Rectangle(Point(.25*500,.6*500),Point(.75*500,.75*500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n\n self.quitTxt = Text(Point(.50*500,.675*500), \"QUIT\")\n self.quitTxt.setSize(30)\n self.quitTxt.setFace(\"helvetica\")\n self.quitTxt.setStyle(\"bold\")\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
# line_count.py
import sys
count = 0
for line in sys.stdin:
count += 1
# print goes to sys.stdout
print count
|
normal
|
{
"blob_id": "46194829fc54c2f3e51febde572e05bcff261fb2",
"index": 7126,
"step-1": "# line_count.py\nimport sys\ncount = 0\nfor line in sys.stdin:\n\tcount += 1\n# print goes to sys.stdout\nprint count",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""I referred below sample.
https://ja.wikipedia.org/wiki/Adapter_%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3#:~:text=Adapter%20%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%88%E3%82%A2%E3%83%80%E3%83%97%E3%82%BF%E3%83%BC%E3%83%BB%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%89,%E5%A4%89%E6%9B%B4%E3%81%99%E3%82%8B%E3%81%93%E3%81%A8%E3%81%8C%E3%81%A7%E3%81%8D%E3%82%8B%E3%80%82
"""
from abc import ABC, abstractmethod
class ProductPrice(ABC):
"""Target"""
@abstractmethod
def get_doll(self) -> float:
pass
class Product:
"""Adaptee"""
def __init__(self, cost: int) -> None:
self.__cost = cost
def get_yen(self) -> int:
return self.__cost
class ProductAdapter(ProductPrice):
"""Adapter"""
DOLL_RATE: int = 110
def __init__(self, product: Product) -> None:
self.__product = product
def get_doll(self) -> float:
doll = self.__product.get_yen() / self.DOLL_RATE
return doll
if __name__ == '__main__':
product = Product(cost=1000)
print(f'product cost {product.get_yen()} yen')
adapted_product = ProductAdapter(product)
print(f'product cost {adapted_product.get_doll():.1f} doll')
|
normal
|
{
"blob_id": "829e23ce2388260467ed159aa7e1480d1a3d6045",
"index": 6546,
"step-1": "<mask token>\n\n\nclass Product:\n <mask token>\n\n def __init__(self, cost: int) ->None:\n self.__cost = cost\n\n def get_yen(self) ->int:\n return self.__cost\n\n\nclass ProductAdapter(ProductPrice):\n \"\"\"Adapter\"\"\"\n DOLL_RATE: int = 110\n\n def __init__(self, product: Product) ->None:\n self.__product = product\n\n def get_doll(self) ->float:\n doll = self.__product.get_yen() / self.DOLL_RATE\n return doll\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductPrice(ABC):\n <mask token>\n <mask token>\n\n\nclass Product:\n \"\"\"Adaptee\"\"\"\n\n def __init__(self, cost: int) ->None:\n self.__cost = cost\n\n def get_yen(self) ->int:\n return self.__cost\n\n\nclass ProductAdapter(ProductPrice):\n \"\"\"Adapter\"\"\"\n DOLL_RATE: int = 110\n\n def __init__(self, product: Product) ->None:\n self.__product = product\n\n def get_doll(self) ->float:\n doll = self.__product.get_yen() / self.DOLL_RATE\n return doll\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProductPrice(ABC):\n \"\"\"Target\"\"\"\n\n @abstractmethod\n def get_doll(self) ->float:\n pass\n\n\nclass Product:\n \"\"\"Adaptee\"\"\"\n\n def __init__(self, cost: int) ->None:\n self.__cost = cost\n\n def get_yen(self) ->int:\n return self.__cost\n\n\nclass ProductAdapter(ProductPrice):\n \"\"\"Adapter\"\"\"\n DOLL_RATE: int = 110\n\n def __init__(self, product: Product) ->None:\n self.__product = product\n\n def get_doll(self) ->float:\n doll = self.__product.get_yen() / self.DOLL_RATE\n return doll\n\n\nif __name__ == '__main__':\n product = Product(cost=1000)\n print(f'product cost {product.get_yen()} yen')\n adapted_product = ProductAdapter(product)\n print(f'product cost {adapted_product.get_doll():.1f} doll')\n",
"step-4": "<mask token>\nfrom abc import ABC, abstractmethod\n\n\nclass ProductPrice(ABC):\n \"\"\"Target\"\"\"\n\n @abstractmethod\n def get_doll(self) ->float:\n pass\n\n\nclass Product:\n \"\"\"Adaptee\"\"\"\n\n def __init__(self, cost: int) ->None:\n self.__cost = cost\n\n def get_yen(self) ->int:\n return self.__cost\n\n\nclass ProductAdapter(ProductPrice):\n \"\"\"Adapter\"\"\"\n DOLL_RATE: int = 110\n\n def __init__(self, product: Product) ->None:\n self.__product = product\n\n def get_doll(self) ->float:\n doll = self.__product.get_yen() / self.DOLL_RATE\n return doll\n\n\nif __name__ == '__main__':\n product = Product(cost=1000)\n print(f'product cost {product.get_yen()} yen')\n adapted_product = ProductAdapter(product)\n print(f'product cost {adapted_product.get_doll():.1f} doll')\n",
"step-5": "\"\"\"I referred below sample.\n\nhttps://ja.wikipedia.org/wiki/Adapter_%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3#:~:text=Adapter%20%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%88%E3%82%A2%E3%83%80%E3%83%97%E3%82%BF%E3%83%BC%E3%83%BB%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3%EF%BC%89,%E5%A4%89%E6%9B%B4%E3%81%99%E3%82%8B%E3%81%93%E3%81%A8%E3%81%8C%E3%81%A7%E3%81%8D%E3%82%8B%E3%80%82\n\"\"\"\n\nfrom abc import ABC, abstractmethod\n\n\nclass ProductPrice(ABC):\n \"\"\"Target\"\"\"\n\n @abstractmethod\n def get_doll(self) -> float:\n pass\n\n\nclass Product:\n \"\"\"Adaptee\"\"\"\n\n def __init__(self, cost: int) -> None:\n self.__cost = cost\n\n def get_yen(self) -> int:\n return self.__cost\n\n\nclass ProductAdapter(ProductPrice):\n \"\"\"Adapter\"\"\"\n\n DOLL_RATE: int = 110\n\n def __init__(self, product: Product) -> None:\n self.__product = product\n\n def get_doll(self) -> float:\n doll = self.__product.get_yen() / self.DOLL_RATE\n return doll\n\n\nif __name__ == '__main__':\n product = Product(cost=1000)\n print(f'product cost {product.get_yen()} yen')\n\n adapted_product = ProductAdapter(product)\n print(f'product cost {adapted_product.get_doll():.1f} doll')\n",
"step-ids": [
7,
9,
12,
13,
14
]
}
|
[
7,
9,
12,
13,
14
] |
#game that has a timer and you need to stop the timer
#with 0 at the end.
import simplegui
#necessary global variables
#time for the timer
time = 0
#the display for the timer(string form)
watch = ''
#tries and correct presses
tries = 0
correct = 0
#changes time to watch(number to string of form A:BC.D)
def format():
global time, watch
t = time
deciseconds = t % 10
remains = t - deciseconds
seconds = (remains % 600) / 10
minutes = remains / 600
if seconds<10:
zero = '0'
else:
zero = ''
watch = str(minutes) + ":" + zero + str(seconds) + "." + str(deciseconds)
#increase the time
def increment():
global time
time = time + 1
#start the timer
def start():
timer.start()
#stop the timer + claculate the tries and correct stops
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
#reset all values
def reset():
global time, correct, tries
time, correct, tries = 0,0,0
stop()
#necessary drawings
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60,'white')
#frame and event handlers
frame = simplegui.create_frame("StOpWaTcH: gAmE", 320, 200)
button1 = frame.add_button("Start timer", start, 100)
button2 = frame.add_button("Stop timer", stop, 100)
button3 = frame.add_button("Resrt timer", reset, 100)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, increment)
#start of the game
frame.start()
|
normal
|
{
"blob_id": "b3c22b4a453aa55da980b090df2749ff9f1066e6",
"index": 5932,
"step-1": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-4": "<mask token>\ntime = 0\nwatch = ''\ntries = 0\ncorrect = 0\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\nframe = simplegui.create_frame('StOpWaTcH: gAmE', 320, 200)\nbutton1 = frame.add_button('Start timer', start, 100)\nbutton2 = frame.add_button('Stop timer', stop, 100)\nbutton3 = frame.add_button('Resrt timer', reset, 100)\nframe.set_draw_handler(draw)\ntimer = simplegui.create_timer(100, increment)\nframe.start()\n",
"step-5": "#game that has a timer and you need to stop the timer\r\n#with 0 at the end.\r\n\r\nimport simplegui\r\n\r\n#necessary global variables\r\n\r\n#time for the timer\r\ntime = 0\r\n#the display for the timer(string form)\r\nwatch = ''\r\n#tries and correct presses\r\ntries = 0\r\ncorrect = 0\r\n\r\n\r\n#changes time to watch(number to string of form A:BC.D)\r\ndef format():\r\n global time, watch\r\n t = time\r\n deciseconds = t % 10\r\n remains = t - deciseconds\r\n seconds = (remains % 600) / 10\r\n minutes = remains / 600\r\n if seconds<10:\r\n zero = '0'\r\n else:\r\n zero = '' \r\n watch = str(minutes) + \":\" + zero + str(seconds) + \".\" + str(deciseconds)\r\n \r\n\r\n#increase the time \r\ndef increment():\r\n global time\r\n time = time + 1 \r\n \r\n \r\n#start the timer \r\ndef start():\r\n timer.start()\r\n \r\n\r\n#stop the timer + claculate the tries and correct stops\r\ndef stop():\r\n global correct, tries\r\n timer.stop()\r\n if time != 0:\r\n tries = tries + 1\r\n if time % 10 == 0:\r\n correct = correct + 1\r\n\r\n\r\n#reset all values \r\ndef reset():\r\n global time, correct, tries\r\n time, correct, tries = 0,0,0\r\n stop() \r\n\r\n\r\n#necessary drawings \r\ndef draw(canvas):\r\n format()\r\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\r\n canvas.draw_text('/', (270, 30), 30, 'white') \r\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\r\n canvas.draw_text(watch, (70, 130), 60,'white')\r\n \r\n\r\n#frame and event handlers\r\nframe = simplegui.create_frame(\"StOpWaTcH: gAmE\", 320, 200)\r\nbutton1 = frame.add_button(\"Start timer\", start, 100)\r\nbutton2 = frame.add_button(\"Stop timer\", stop, 100)\r\nbutton3 = frame.add_button(\"Resrt timer\", reset, 100)\r\nframe.set_draw_handler(draw)\r\ntimer = simplegui.create_timer(100, increment)\r\n\r\n\r\n#start of the game\r\nframe.start()\r\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
#예외처리 문법을 활용하여 정수가 아닌 숫자를 입력했을때 에러문구가나오도록 작성.(에러문구:정수가아닙니다)
try:
x = int(input('정수를 입력하세요: '))
print(x)
except:
print('정수가 아닙니다.')
|
normal
|
{
"blob_id": "906265182a9776fec5bad41bfc9ee68b36873d1e",
"index": 573,
"step-1": "<mask token>\n",
"step-2": "try:\n x = int(input('정수를 입력하세요: '))\n print(x)\nexcept:\n print('정수가 아닙니다.')\n",
"step-3": "#예외처리 문법을 활용하여 정수가 아닌 숫자를 입력했을때 에러문구가나오도록 작성.(에러문구:정수가아닙니다)\n\ntry:\n x = int(input('정수를 입력하세요: '))\n print(x)\n \nexcept:\n print('정수가 아닙니다.')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
Like Places but possibly script based and temporary.
Like a whisper command where is keeps tracks of participants.
"""
|
flexible
|
{
"blob_id": "378c07c512425cb6ac6c998eaaa86892b02a37b8",
"index": 6905,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\nLike Places but possibly script based and temporary.\nLike a whisper command where is keeps tracks of participants.\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
from unittest import TestCase
from angelos.document.statements import Statement, Verified, Trusted, Revoked
class TestStatement(TestCase):
def setUp(self):
self.instance = Statement()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestVerified(TestCase):
def setUp(self):
self.instance = Verified()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestTrusted(TestCase):
def setUp(self):
self.instance = Trusted()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestRevoked(TestCase):
def setUp(self):
self.instance = Revoked()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
|
normal
|
{
"blob_id": "f494dc99febfad99b371d72f542556a9024bc27d",
"index": 5333,
"step-1": "<mask token>\n\n\nclass TestVerified(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-2": "<mask token>\n\n\nclass TestStatement(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-3": "<mask token>\n\n\nclass TestStatement(TestCase):\n <mask token>\n\n def tearDown(self):\n del self.instance\n <mask token>\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-4": "from unittest import TestCase\nfrom angelos.document.statements import Statement, Verified, Trusted, Revoked\n\n\nclass TestStatement(TestCase):\n\n def setUp(self):\n self.instance = Statement()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-5": "#\n# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.\n#\n# This software is available under the terms of the MIT license. Parts are licensed under\n# different terms if stated. The legal terms are attached to the LICENSE file and are\n# made available on:\n#\n# https://opensource.org/licenses/MIT\n#\n# SPDX-License-Identifier: MIT\n#\n# Contributors:\n# Kristoffer Paulsson - initial implementation\n#\nfrom unittest import TestCase\n\nfrom angelos.document.statements import Statement, Verified, Trusted, Revoked\n\n\nclass TestStatement(TestCase):\n def setUp(self):\n self.instance = Statement()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestVerified(TestCase):\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())",
"step-ids": [
9,
13,
14,
17,
18
]
}
|
[
9,
13,
14,
17,
18
] |
from django import forms
from django.core.validators import RegexValidator
from dashboard.validators import validate_domainonly_email
class addUserForm(forms.Form):
username = forms.CharField(label='User Name', required="required", disabled="", min_length=6, max_length=128,
help_text="",
widget=forms.TextInput(
attrs={
'style': '',
'placeholder': '',
}
))
first_name = forms.CharField(label='First Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
last_name = forms.CharField(label='Last Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
email = forms.EmailField(label='Email', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[validate_domainonly_email])
password = forms.CharField(label='Password', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[
RegexValidator('^(\w+\d+|\d+\w+)+$', message="Password should be a combination of Alphabets and Numbers")])
confirm_password = forms.CharField(label='Confirm Password', required="required", disabled="", min_length=6,
max_length=128,
help_text="")
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if not username and not first_name and not last_name and not email and not password and not confirm_password:
raise forms.ValidationError('There are errors in the fields...!')
# class editUserForm(forms.Form):
# username = forms.CharField(label='User Name', required="required", disabled="disabled", min_length="6",
# max_length=128, help_text="")
# first_name = forms.CharField(label='First Name', max_length=254, help_text="")
# last_name = forms.CharField(label='Last Name', max_length=254, help_text="")
# email = forms.EmailField(label='Email', max_length=8, help_text="")
#
# def clean(self):
# cleaned_data = super(editUserForm, self).clean()
# username = cleaned_data.get('username')
# first_name = cleaned_data.get('first_name')
# last_name = cleaned_data.get('last_name')
# email = cleaned_data.get('email')
# if not username and not first_name and not last_name and not email:
# raise forms.ValidationError('There are errors in the fields...!')
|
normal
|
{
"blob_id": "39b6ca21b8d4856e2b2edfcbd00b75fbce6dfff7",
"index": 1407,
"step-1": "<mask token>\n\n\nclass addUserForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass addUserForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-3": "<mask token>\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required='required',\n disabled='', min_length=6, max_length=128, help_text='', widget=\n forms.TextInput(attrs={'style': '', 'placeholder': ''}))\n first_name = forms.CharField(label='First Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n last_name = forms.CharField(label='Last Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n email = forms.EmailField(label='Email', required='required', disabled=\n '', min_length=6, max_length=128, help_text='', validators=[\n validate_domainonly_email])\n password = forms.CharField(label='Password', required='required',\n disabled='', min_length=6, max_length=128, help_text='', validators\n =[RegexValidator('^(\\\\w+\\\\d+|\\\\d+\\\\w+)+$', message=\n 'Password should be a combination of Alphabets and Numbers')])\n confirm_password = forms.CharField(label='Confirm Password', required=\n 'required', disabled='', min_length=6, max_length=128, help_text='')\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-4": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom dashboard.validators import validate_domainonly_email\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required='required',\n disabled='', min_length=6, max_length=128, help_text='', widget=\n forms.TextInput(attrs={'style': '', 'placeholder': ''}))\n first_name = forms.CharField(label='First Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n last_name = forms.CharField(label='Last Name', required='required',\n disabled='', min_length=3, max_length=128, help_text='')\n email = forms.EmailField(label='Email', required='required', disabled=\n '', min_length=6, max_length=128, help_text='', validators=[\n validate_domainonly_email])\n password = forms.CharField(label='Password', required='required',\n disabled='', min_length=6, max_length=128, help_text='', validators\n =[RegexValidator('^(\\\\w+\\\\d+|\\\\d+\\\\w+)+$', message=\n 'Password should be a combination of Alphabets and Numbers')])\n confirm_password = forms.CharField(label='Confirm Password', required=\n 'required', disabled='', min_length=6, max_length=128, help_text='')\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if (not username and not first_name and not last_name and not email and\n not password and not confirm_password):\n raise forms.ValidationError('There are errors in the fields...!')\n",
"step-5": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom dashboard.validators import validate_domainonly_email\n\n\nclass addUserForm(forms.Form):\n username = forms.CharField(label='User Name', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\",\n widget=forms.TextInput(\n attrs={\n 'style': '',\n 'placeholder': '',\n }\n ))\n first_name = forms.CharField(label='First Name', required=\"required\", disabled=\"\", min_length=3, max_length=128,\n help_text=\"\")\n last_name = forms.CharField(label='Last Name', required=\"required\", disabled=\"\", min_length=3, max_length=128,\n help_text=\"\")\n email = forms.EmailField(label='Email', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\", validators=[validate_domainonly_email])\n\n password = forms.CharField(label='Password', required=\"required\", disabled=\"\", min_length=6, max_length=128,\n help_text=\"\", validators=[\n RegexValidator('^(\\w+\\d+|\\d+\\w+)+$', message=\"Password should be a combination of Alphabets and Numbers\")])\n confirm_password = forms.CharField(label='Confirm Password', required=\"required\", disabled=\"\", min_length=6,\n max_length=128,\n help_text=\"\")\n\n def clean(self):\n cleaned_data = super(addUserForm, self).clean()\n username = cleaned_data.get('username')\n first_name = cleaned_data.get('first_name')\n last_name = cleaned_data.get('last_name')\n email = cleaned_data.get('email')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if not username and not first_name and not last_name and not email and not password and not confirm_password:\n raise forms.ValidationError('There are errors in the fields...!')\n\n# class editUserForm(forms.Form):\n# username = forms.CharField(label='User Name', required=\"required\", disabled=\"disabled\", min_length=\"6\",\n# max_length=128, help_text=\"\")\n# first_name = forms.CharField(label='First Name', max_length=254, help_text=\"\")\n# last_name = forms.CharField(label='Last Name', max_length=254, help_text=\"\")\n# email = forms.EmailField(label='Email', max_length=8, help_text=\"\")\n#\n# def clean(self):\n# cleaned_data = super(editUserForm, self).clean()\n# username = cleaned_data.get('username')\n# first_name = cleaned_data.get('first_name')\n# last_name = cleaned_data.get('last_name')\n# email = cleaned_data.get('email')\n# if not username and not first_name and not last_name and not email:\n# raise forms.ValidationError('There are errors in the fields...!')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
socket.bind('tcp://10.20.32.221:5555')
while True:
message = socket.recv()
print('Received request: %s' % message)
time.sleep(1)
socket.send(b'World')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://10.20.32.221:5555')
while True:
message = socket.recv()
print('Received request: %s' % message)
time.sleep(1)
socket.send(b'World')
<|reserved_special_token_1|>
import time
import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://10.20.32.221:5555')
while True:
message = socket.recv()
print('Received request: %s' % message)
time.sleep(1)
socket.send(b'World')
<|reserved_special_token_1|>
import time #melakukan import library time
import zmq #melakukan import library ZeroMQ
context = zmq.Context() #melakukan inisialisasi context ZeroMQ pada variable context
socket = context.socket(zmq.REP) #menginisialisasikan socket(Reply) pada variable context(ZeroMQ)
socket.bind("tcp://10.20.32.221:5555") #melakukan binding socket dengan port tcp 5555
while True: #Looping selama kondisi benar
message = socket.recv() #menampung pesan yang diterima oleh socket ke dalam variable message
print("Received request: %s" % message) #melakukan output dari message yang diterima
# do some work
time.sleep(1) #waktu interval untuk istirahat/melakukan proses berikutnya
socket.send(b"World") #mengirim suatu pesan berupa bit pesan ('world') ke dalam socket
|
flexible
|
{
"blob_id": "ccba923fa4b07ca9c87c57797e1e6c7da3a71183",
"index": 4315,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-3": "<mask token>\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-4": "import time\nimport zmq\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-5": "import time #melakukan import library time\nimport zmq #melakukan import library ZeroMQ\n\ncontext = zmq.Context() #melakukan inisialisasi context ZeroMQ pada variable context \nsocket = context.socket(zmq.REP) #menginisialisasikan socket(Reply) pada variable context(ZeroMQ)\nsocket.bind(\"tcp://10.20.32.221:5555\") #melakukan binding socket dengan port tcp 5555\n\nwhile True: #Looping selama kondisi benar\n\n message = socket.recv() #menampung pesan yang diterima oleh socket ke dalam variable message\n print(\"Received request: %s\" % message) #melakukan output dari message yang diterima\n\n\t# do some work\n time.sleep(1) #waktu interval untuk istirahat/melakukan proses berikutnya\n\n\n socket.send(b\"World\") #mengirim suatu pesan berupa bit pesan ('world') ke dalam socket\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = 'recipe',
labels = {'quantity': 'Qty'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',
'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
<|reserved_special_token_0|>
class RecipeForm(forms.ModelForm):
image = forms.ImageField(label='Image', required=False, widget=
CustomClearableFileInput)
category = forms.ModelMultipleChoiceField(queryset=Category.objects.all
(), label='Choose some categories from the list', required=False,
widget=forms.CheckboxSelectMultiple)
tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=
'Choose some tags from the list', required=False, widget=forms.
CheckboxSelectMultiple)
class Meta:
model = Recipe
exclude = ('author', 'date', 'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')
labels = {'intro': 'Brief Description'}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError(
'The number of servings must be greater than zero'
)
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {'title': 'eg: Carrot Cake', 'intro':
'eg: A deliciously sweet dessert', 'prep_time':
'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':
'eg: 1hr 20mins', 'directions':
'Describe the steps to make this recipe', 'image': '',
'image_credit': 'Who took the photo?', 'servings':
'No. of servings', 'tag': '', 'category': ''}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewTagsForm(forms.ModelForm):
<|reserved_special_token_0|>
class Meta:
model = Tag
fields = '__all__'
<|reserved_special_token_0|>
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = 'recipe',
labels = {'quantity': 'Qty'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',
'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
<|reserved_special_token_0|>
class RecipeForm(forms.ModelForm):
image = forms.ImageField(label='Image', required=False, widget=
CustomClearableFileInput)
category = forms.ModelMultipleChoiceField(queryset=Category.objects.all
(), label='Choose some categories from the list', required=False,
widget=forms.CheckboxSelectMultiple)
tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=
'Choose some tags from the list', required=False, widget=forms.
CheckboxSelectMultiple)
class Meta:
model = Recipe
exclude = ('author', 'date', 'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')
labels = {'intro': 'Brief Description'}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError(
'The number of servings must be greater than zero'
)
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {'title': 'eg: Carrot Cake', 'intro':
'eg: A deliciously sweet dessert', 'prep_time':
'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':
'eg: 1hr 20mins', 'directions':
'Describe the steps to make this recipe', 'image': '',
'image_credit': 'Who took the photo?', 'servings':
'No. of servings', 'tag': '', 'category': ''}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewTagsForm(forms.ModelForm):
tagname = forms.CharField(label='... or add your own tag', required=False)
class Meta:
model = Tag
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'tagname': 'One single word only'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = 'recipe',
labels = {'quantity': 'Qty'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',
'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
<|reserved_special_token_0|>
class RecipeForm(forms.ModelForm):
image = forms.ImageField(label='Image', required=False, widget=
CustomClearableFileInput)
category = forms.ModelMultipleChoiceField(queryset=Category.objects.all
(), label='Choose some categories from the list', required=False,
widget=forms.CheckboxSelectMultiple)
tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=
'Choose some tags from the list', required=False, widget=forms.
CheckboxSelectMultiple)
class Meta:
model = Recipe
exclude = ('author', 'date', 'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')
labels = {'intro': 'Brief Description'}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError(
'The number of servings must be greater than zero'
)
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {'title': 'eg: Carrot Cake', 'intro':
'eg: A deliciously sweet dessert', 'prep_time':
'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':
'eg: 1hr 20mins', 'directions':
'Describe the steps to make this recipe', 'image': '',
'image_credit': 'Who took the photo?', 'servings':
'No. of servings', 'tag': '', 'category': ''}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NewCategoriesForm(forms.ModelForm):
friendly_name = forms.CharField(label='... or add your own category',
required=False)
class Meta:
model = Category
fields = 'friendly_name',
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'friendly_name': 'One single word only'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class NewTagsForm(forms.ModelForm):
tagname = forms.CharField(label='... or add your own tag', required=False)
class Meta:
model = Tag
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'tagname': 'One single word only'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = 'recipe',
labels = {'quantity': 'Qty'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',
'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
<|reserved_special_token_0|>
class RecipeForm(forms.ModelForm):
image = forms.ImageField(label='Image', required=False, widget=
CustomClearableFileInput)
category = forms.ModelMultipleChoiceField(queryset=Category.objects.all
(), label='Choose some categories from the list', required=False,
widget=forms.CheckboxSelectMultiple)
tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=
'Choose some tags from the list', required=False, widget=forms.
CheckboxSelectMultiple)
class Meta:
model = Recipe
exclude = ('author', 'date', 'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')
labels = {'intro': 'Brief Description'}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError(
'The number of servings must be greater than zero'
)
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {'title': 'eg: Carrot Cake', 'intro':
'eg: A deliciously sweet dessert', 'prep_time':
'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':
'eg: 1hr 20mins', 'directions':
'Describe the steps to make this recipe', 'image': '',
'image_credit': 'Who took the photo?', 'servings':
'No. of servings', 'tag': '', 'category': ''}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
<|reserved_special_token_1|>
from django import forms
from .models import Recipe, Ingredient, Category, Tag
from blog.widgets import CustomClearableFileInput
class NewCategoriesForm(forms.ModelForm):
friendly_name = forms.CharField(label='... or add your own category',
required=False)
class Meta():
model = Category
fields = ('friendly_name',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'friendly_name': 'One single word only'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class NewTagsForm(forms.ModelForm):
tagname = forms.CharField(label='... or add your own tag', required=False)
class Meta():
model = Tag
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'tagname': 'One single word only'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = ('recipe', )
labels = {
'quantity': 'Qty',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'quantity': 'eg: 0.1',
'unit': 'eg: ml',
'preparation': 'eg: chopped',
'name': 'eg: tomatoes'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
IngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient,
form=IngredientForm,
extra=25,
min_num=1,
validate_min=True)
class RecipeForm(forms.ModelForm):
# Replace image field
image = forms.ImageField(label='Image',
required=False,
widget=CustomClearableFileInput)
# Change rendering of form to user-friendly checkboxes
# Credit:
# https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024
category = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(),
label='Choose some categories from the list',
required=False,
widget=forms.CheckboxSelectMultiple
)
# Change rendering of form to user-friendly checkboxes
# Credit:
# https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024
tag = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
label='Choose some tags from the list',
required=False,
widget=forms.CheckboxSelectMultiple
)
class Meta:
model = Recipe
exclude = ('author', 'date',
'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box',
'mail_sent', 'discount_code',)
labels = {
'intro': 'Brief Description',
}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError('The number of servings must be \
greater than zero')
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {
'title': 'eg: Carrot Cake',
'intro': 'eg: A deliciously sweet dessert',
'prep_time': 'eg: 1hr 20mins',
'cook_time': 'eg: 1hr 20mins',
'total_time': 'eg: 1hr 20mins',
'directions': 'Describe the steps to make this recipe',
'image': '',
'image_credit': 'Who took the photo?',
'servings': 'No. of servings',
'tag': '',
'category': '',
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
|
flexible
|
{
"blob_id": "7484bd9012bc9952b679073ae036de4554d362be",
"index": 5175,
"step-1": "<mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-2": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n <mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-3": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-4": "<mask token>\n\n\nclass NewCategoriesForm(forms.ModelForm):\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n\n class Meta:\n model = Category\n fields = 'friendly_name',\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'friendly_name': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-5": "from django import forms\nfrom .models import Recipe, Ingredient, Category, Tag\nfrom blog.widgets import CustomClearableFileInput\n\n\nclass NewCategoriesForm(forms.ModelForm):\n\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n class Meta():\n model = Category\n fields = ('friendly_name',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'friendly_name': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n class Meta():\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'tagname': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n class Meta:\n model = Ingredient\n exclude = ('recipe', )\n\n labels = {\n 'quantity': 'Qty',\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'quantity': 'eg: 0.1',\n 'unit': 'eg: ml',\n 'preparation': 'eg: chopped',\n 'name': 'eg: tomatoes'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\nIngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient,\n form=IngredientForm,\n extra=25,\n min_num=1,\n validate_min=True)\n\n\nclass RecipeForm(forms.ModelForm):\n\n # Replace image field\n image = forms.ImageField(label='Image',\n required=False,\n widget=CustomClearableFileInput)\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n category = forms.ModelMultipleChoiceField(\n queryset=Category.objects.all(),\n label='Choose some categories from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n tag = forms.ModelMultipleChoiceField(\n queryset=Tag.objects.all(),\n label='Choose some tags from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date',\n 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box',\n 'mail_sent', 'discount_code',)\n\n labels = {\n 'intro': 'Brief Description',\n }\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError('The number of servings must be \\\n greater than zero')\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n\n placeholders = {\n 'title': 'eg: Carrot Cake',\n 'intro': 'eg: A deliciously sweet dessert',\n 'prep_time': 'eg: 1hr 20mins',\n 'cook_time': 'eg: 1hr 20mins',\n 'total_time': 'eg: 1hr 20mins',\n 'directions': 'Describe the steps to make this recipe',\n 'image': '',\n 'image_credit': 'Who took the photo?',\n 'servings': 'No. of servings',\n 'tag': '',\n 'category': '',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-ids": [
6,
7,
9,
12,
15
]
}
|
[
6,
7,
9,
12,
15
] |
../../2.0.2/mpl_examples/axes_grid/simple_axesgrid2.py
|
normal
|
{
"blob_id": "73d1129418711c35046a99c1972a413357079836",
"index": 3022,
"step-1": "../../2.0.2/mpl_examples/axes_grid/simple_axesgrid2.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Created on Mar 7, 2019
@author: hzhang0418
'''
import pymp
from v6.mono import Mono
class BruteForce(Mono):
def __init__(self, features, labels, params):
super(BruteForce, self).__init__(features, labels, params)
def _count_inconsistencies(self):
if self.num_cores==1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count>0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores==1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
|
normal
|
{
"blob_id": "32c18bd578bbf91c76604f063421a65a4f7a8b63",
"index": 2204,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BruteForce(Mono):\n <mask token>\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-3": "<mask token>\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-4": "<mask token>\nimport pymp\nfrom v6.mono import Mono\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-5": "'''\nCreated on Mar 7, 2019\n\n@author: hzhang0418\n'''\n\nimport pymp\n\nfrom v6.mono import Mono\n\nclass BruteForce(Mono):\n \n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n \n \n def _count_inconsistencies(self):\n if self.num_cores==1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n \n else:\n nmatch = len(self.match_indices)\n \n threads2incons_count = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n count = 0\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n \n if count>0:\n local_index2incons_count[mi] = count\n \n threads2incons_count[p.thread_num] = local_index2incons_count\n \n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count \n \n return self.index2count\n \n \n def _get_inconsistency_indices(self):\n \n if self.num_cores==1:\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n else:\n \n nmatch = len(self.match_indices)\n \n threads2incons = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n incons_indices = []\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n local_index2incons[mi] = incons_indices\n \n threads2incons[p.thread_num] = local_index2incons\n \n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n return self.index2incons",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
import Bio
import os
import sys
from Bio import PDB
from Bio.PDB import PDBIO
from Bio.PDB.PDBParser import PDBParser
import math
import numpy
from collections import Counter
import random
from Bio.PDB import *
import gzip
def get_center(res_list):
coord = []
for atom in residue:
# print(atom.coord)
at=atom.coord
x=at[0]
y=at[1]
z=at[2]
atcord=[x,y,z]
coord.append(atcord)
x=0
y=0
z=0
i=0
for point in coord:
i=i+1
x=x+point[0]
y=y+point[1]
z=z+point[2]
x=x/i
y=y/i
z=z/i
center=numpy.array([x,y,z])
return center;
pdbl=PDB.PDBList()
Error_out=open("microfolds_out.txt","w")
cng=0
AA=['PHE','TRP','TYR','ALA','CYS','ASP','GLU','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL']
CF=[' DA',' DC',' DG',' DT',' A',' C',' G',' U','HOH','UNK','UNX']
Metals=['FE','MN','CU','CD','OS','CO','NI','W','PT','MO','U','TA','V','AU','IR','Y','GD','RU','YB','SM','PD','AG','EU','RH','PR','RE','LU','TB','HF','HO','DY','ZR','CR','LA','CE','ER','AM','CM','TH','PU','SC','PA']
cofactor=['BCB','CLA','CHL','BCL','CL0','PMR','PHO']
#organic_cofactors_list=[]
#organic_cofactors_pdb_file=open('manual_cofactor_list_with_quinone.txt','r')
#for line in organic_cofactors_pdb_file:
# line=line.split('\t')
# organic_cofactors_list.append(line[1][:-1])
idxfile='cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'
idx=open(idxfile,"r")
idx.readline()
#idx.readline()
#idx.readline()
EC=""
i=0
for line in idx:
i=i+1
print(i)
try:
protein=line[0:4]
protein=protein.lower()
parser = PDB.PDBParser(PERMISSIVE=1)
curdir=os.getcwd()
pdbl.retrieve_pdb_file(protein,pdir=curdir+'/pdbs/')
#print (protein,'/home/hraanan/pdb_download/'+protein[1:3]+'/pdb'+protein+'.ent.gz')
#print ('unziping')
# gz = gzip.open(filename, 'rb')
# with open(final_file, 'wb') as out:
# out.writelines(gz)
# gz.close()
# #structure = parser.get_structure(protein,protein+'.pdb')
##
# #print ('unziping done')
# #os.remove(filename)
# pdbl.retrieve_pdb_file(protein)
# structure = parser.get_structure(protein,protein[1:3]+'/pdb'+protein+'.ent')
# head= structure.header['head']
# comp = structure.header['compound']
# EC==""
#
# try:
# comp=comp['1']
## except KeyError:
## try:
## EC=comp['ec_number']
## except KeyError:
## try:
## EC=comp['ec']
# except KeyError:
# EC='-.-.-.-'
# try:
# EC=comp['ec']
# except KeyError:
# pass
# try:
# EC=comp['ec_number']
# except KeyError:
# pass
# if EC=="":
# EC='-.-.-.-'
# #print(EC)
###
###
#
# sf4ID=[]
# sf4coord=[]
# for model in structure:
# if model.id==0:
# atom_list = Selection.unfold_entities(model, 'A') # A for atoms
# ns = NeighborSearch(atom_list)
# lig=[]
# for chain in model:
# for residue in chain:
# #if residue.resname not in AA and residue.resname not in CF :
# #print(chain.id,residue.resname)
# if residue.resname in organic_cofactors_list:
# #print(chain.id,residue.resname)
# atom_in_res=[]
# for atom in residue:
# atom_in_res.append(atom.element)
#
# #if any(x in Metals for x in atom_in_res)==False:
# #print ('not metal')
# # continue
#
# center = get_center(residue)
# #print ('center',center)
# lig=protein,chain.id,residue.id[1],residue.resname,center
# #print(lig)
# all_neighbors = ns.search(center, 15.0,"R") # 15.0 for distance in angstrom
# microfold_name=protein+'.'+residue.resname+'_'+ chain.id +'_'+str(residue.id[1])+'_'+head+'_'+EC
# microfold_name=microfold_name.replace(' ','')
# microfold_name=microfold_name.replace('/','_')
# microfold_dir=residue.resname
# microfold_dir=microfold_dir.replace(' ','')
# # print(microfold_name)
# if not os.path.exists('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir):
# os.makedirs('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir)
# Select = Bio.PDB.Select
# class MicroSelect(Select):
# def accept_residue(self, residue):
# if residue in all_neighbors and residue.resname!='HOH':
# return 1
# else:
# return 0
# io=PDBIO()
# io.set_structure(structure)
# #print('/home/hraanan/MicrofoldsPDBs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
# io.save('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())
except:
# e = sys.exc_info()[0]
Error_out.write('xxx\n')
Error_out.write('/n' )
Error_out.write( "<p>Error: %s</p>" )
Error_out.write('xxx\n')
print('err')
continue
Error_out.close()
#prot.close()
print("end")
|
normal
|
{
"blob_id": "8b29c12c294a8614d8be96c312ecffa9d3bcb3f8",
"index": 4575,
"step-1": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\n<mask token>\nidx.readline()\n<mask token>\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-3": "<mask token>\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\npdbl = PDB.PDBList()\nError_out = open('microfolds_out.txt', 'w')\ncng = 0\nAA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']\nCF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',\n 'UNX']\nMetals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',\n 'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',\n 'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',\n 'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']\ncofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']\nidxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\nidx = open(idxfile, 'r')\nidx.readline()\nEC = ''\ni = 0\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-4": "import Bio\nimport os\nimport sys\nfrom Bio import PDB\nfrom Bio.PDB import PDBIO\nfrom Bio.PDB.PDBParser import PDBParser\nimport math\nimport numpy\nfrom collections import Counter\nimport random\nfrom Bio.PDB import *\nimport gzip\n\n\ndef get_center(res_list):\n coord = []\n for atom in residue:\n at = atom.coord\n x = at[0]\n y = at[1]\n z = at[2]\n atcord = [x, y, z]\n coord.append(atcord)\n x = 0\n y = 0\n z = 0\n i = 0\n for point in coord:\n i = i + 1\n x = x + point[0]\n y = y + point[1]\n z = z + point[2]\n x = x / i\n y = y / i\n z = z / i\n center = numpy.array([x, y, z])\n return center\n\n\npdbl = PDB.PDBList()\nError_out = open('microfolds_out.txt', 'w')\ncng = 0\nAA = ['PHE', 'TRP', 'TYR', 'ALA', 'CYS', 'ASP', 'GLU', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL']\nCF = [' DA', ' DC', ' DG', ' DT', ' A', ' C', ' G', ' U', 'HOH', 'UNK',\n 'UNX']\nMetals = ['FE', 'MN', 'CU', 'CD', 'OS', 'CO', 'NI', 'W', 'PT', 'MO', 'U',\n 'TA', 'V', 'AU', 'IR', 'Y', 'GD', 'RU', 'YB', 'SM', 'PD', 'AG', 'EU',\n 'RH', 'PR', 'RE', 'LU', 'TB', 'HF', 'HO', 'DY', 'ZR', 'CR', 'LA', 'CE',\n 'ER', 'AM', 'CM', 'TH', 'PU', 'SC', 'PA']\ncofactor = ['BCB', 'CLA', 'CHL', 'BCL', 'CL0', 'PMR', 'PHO']\nidxfile = 'cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\nidx = open(idxfile, 'r')\nidx.readline()\nEC = ''\ni = 0\nfor line in idx:\n i = i + 1\n print(i)\n try:\n protein = line[0:4]\n protein = protein.lower()\n parser = PDB.PDBParser(PERMISSIVE=1)\n curdir = os.getcwd()\n pdbl.retrieve_pdb_file(protein, pdir=curdir + '/pdbs/')\n except:\n Error_out.write('xxx\\n')\n Error_out.write('/n')\n Error_out.write('<p>Error: %s</p>')\n Error_out.write('xxx\\n')\n print('err')\n continue\nError_out.close()\nprint('end')\n",
"step-5": "import Bio\r\nimport os\r\nimport sys\r\nfrom Bio import PDB\r\nfrom Bio.PDB import PDBIO\r\nfrom Bio.PDB.PDBParser import PDBParser\r\nimport math\r\nimport numpy\r\nfrom collections import Counter\r\nimport random \r\nfrom Bio.PDB import *\r\nimport gzip\r\ndef get_center(res_list):\r\n coord = []\r\n \r\n for atom in residue:\r\n # print(atom.coord)\r\n at=atom.coord\r\n x=at[0]\r\n y=at[1]\r\n z=at[2]\r\n atcord=[x,y,z]\r\n coord.append(atcord)\r\n x=0\r\n y=0\r\n z=0\r\n i=0\r\n for point in coord:\r\n i=i+1\r\n x=x+point[0]\r\n y=y+point[1]\r\n z=z+point[2]\r\n x=x/i\r\n y=y/i\r\n z=z/i\r\n center=numpy.array([x,y,z]) \r\n return center;\r\n\r\n\r\n\r\npdbl=PDB.PDBList()\r\nError_out=open(\"microfolds_out.txt\",\"w\")\r\n\r\n\r\ncng=0\r\nAA=['PHE','TRP','TYR','ALA','CYS','ASP','GLU','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL']\r\nCF=[' DA',' DC',' DG',' DT',' A',' C',' G',' U','HOH','UNK','UNX']\r\nMetals=['FE','MN','CU','CD','OS','CO','NI','W','PT','MO','U','TA','V','AU','IR','Y','GD','RU','YB','SM','PD','AG','EU','RH','PR','RE','LU','TB','HF','HO','DY','ZR','CR','LA','CE','ER','AM','CM','TH','PU','SC','PA']\r\ncofactor=['BCB','CLA','CHL','BCL','CL0','PMR','PHO']\r\n\r\n#organic_cofactors_list=[]\r\n#organic_cofactors_pdb_file=open('manual_cofactor_list_with_quinone.txt','r')\r\n#for line in organic_cofactors_pdb_file:\r\n# line=line.split('\\t')\r\n# organic_cofactors_list.append(line[1][:-1])\r\n\r\n\r\n\r\n\r\nidxfile='cullpdb_pc90_res10_R1_inclNOTXRAY_inclCA_d161006_chains.txt'\r\nidx=open(idxfile,\"r\")\r\nidx.readline()\r\n#idx.readline()\r\n#idx.readline()\r\nEC=\"\"\r\ni=0\r\nfor line in idx:\r\n i=i+1\r\n print(i)\r\n try:\r\n \r\n protein=line[0:4]\r\n protein=protein.lower()\r\n parser = PDB.PDBParser(PERMISSIVE=1)\r\n curdir=os.getcwd()\r\n pdbl.retrieve_pdb_file(protein,pdir=curdir+'/pdbs/')\r\n #print (protein,'/home/hraanan/pdb_download/'+protein[1:3]+'/pdb'+protein+'.ent.gz')\r\n #print ('unziping')\r\n# gz = gzip.open(filename, 'rb') \r\n# with open(final_file, 'wb') as out: \r\n# out.writelines(gz) \r\n# gz.close()\r\n# #structure = parser.get_structure(protein,protein+'.pdb') \r\n## \r\n# #print ('unziping done')\r\n# #os.remove(filename)\r\n# pdbl.retrieve_pdb_file(protein)\r\n# structure = parser.get_structure(protein,protein[1:3]+'/pdb'+protein+'.ent')\r\n# head= structure.header['head']\r\n# comp = structure.header['compound']\r\n# EC==\"\"\r\n# \r\n# try:\r\n# comp=comp['1']\r\n## except KeyError:\r\n## try:\r\n## EC=comp['ec_number']\r\n## except KeyError:\r\n## try:\r\n## EC=comp['ec']\r\n# except KeyError:\r\n# EC='-.-.-.-'\r\n# try:\r\n# EC=comp['ec']\r\n# except KeyError:\r\n# pass\r\n# try:\r\n# EC=comp['ec_number']\r\n# except KeyError:\r\n# pass\r\n# if EC==\"\": \r\n# EC='-.-.-.-'\r\n# #print(EC)\r\n###\r\n### \r\n# \r\n# sf4ID=[]\r\n# sf4coord=[]\r\n# for model in structure:\r\n# if model.id==0:\r\n# atom_list = Selection.unfold_entities(model, 'A') # A for atoms\r\n# ns = NeighborSearch(atom_list)\r\n# lig=[]\r\n# for chain in model:\r\n# for residue in chain:\r\n# #if residue.resname not in AA and residue.resname not in CF :\r\n# #print(chain.id,residue.resname)\r\n# if residue.resname in organic_cofactors_list: \r\n# #print(chain.id,residue.resname)\r\n# atom_in_res=[]\r\n# for atom in residue:\r\n# atom_in_res.append(atom.element)\r\n# \r\n# #if any(x in Metals for x in atom_in_res)==False:\r\n# #print ('not metal')\r\n# # continue\r\n# \r\n# center = get_center(residue)\r\n# #print ('center',center)\r\n# lig=protein,chain.id,residue.id[1],residue.resname,center\r\n# #print(lig)\r\n# all_neighbors = ns.search(center, 15.0,\"R\") # 15.0 for distance in angstrom\r\n# microfold_name=protein+'.'+residue.resname+'_'+ chain.id +'_'+str(residue.id[1])+'_'+head+'_'+EC\r\n# microfold_name=microfold_name.replace(' ','')\r\n# microfold_name=microfold_name.replace('/','_')\r\n# microfold_dir=residue.resname\r\n# microfold_dir=microfold_dir.replace(' ','')\r\n# # print(microfold_name)\r\n# if not os.path.exists('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir):\r\n# os.makedirs('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir)\r\n# Select = Bio.PDB.Select\r\n# class MicroSelect(Select):\r\n# def accept_residue(self, residue):\r\n# if residue in all_neighbors and residue.resname!='HOH':\r\n# return 1\r\n# else:\r\n# return 0\r\n# io=PDBIO()\r\n# io.set_structure(structure)\r\n# #print('/home/hraanan/MicrofoldsPDBs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect()) \r\n# io.save('/home/hraanan/MicrofoldsPDBs/organic/pdbs/'+microfold_dir+'/'+microfold_name+'.pdb', MicroSelect())\r\n except:\r\n# e = sys.exc_info()[0]\r\n Error_out.write('xxx\\n')\r\n Error_out.write('/n' )\r\n Error_out.write( \"<p>Error: %s</p>\" )\r\n Error_out.write('xxx\\n')\r\n print('err')\r\n continue\r\n \r\n \r\nError_out.close()\r\n#prot.close()\r\nprint(\"end\")\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import time
with open("src/time.txt", "w") as f:
f.write(str(int(time.time())))
|
normal
|
{
"blob_id": "0058a6d3c9d4e600885b876614362ea4401ce2fe",
"index": 1640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('src/time.txt', 'w') as f:\n f.write(str(int(time.time())))\n",
"step-3": "import time\nwith open('src/time.txt', 'w') as f:\n f.write(str(int(time.time())))\n",
"step-4": "import time\n\nwith open(\"src/time.txt\", \"w\") as f:\n f.write(str(int(time.time())))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(beloning, beloningICOR)
<|reserved_special_token_0|>
print(beloning, beloningPROG)
<|reserved_special_token_0|>
print(beloning, beloningCSN)
<|reserved_special_token_0|>
print('de gemiddelde beloning is:€ ', gemiddelde / 3)
<|reserved_special_token_0|>
print('uw totale vergoeding is:€ ', totalevergoeding)
<|reserved_special_token_0|>
print('mijn cijfers gemiddeld is een', gemiddeld_cijfer,
'en dat levert een beloning op van: €', totalevergoeding)
<|reserved_special_token_1|>
cijferICOR = float(input('Wat is je cijfer voor ICOR?: '))
x = 30
beloningICOR = cijferICOR * x
beloning = 'beloning €'
print(beloning, beloningICOR)
cijferPROG = float(input('Wat is je cijfer voor PROG: '))
beloningPROG = cijferPROG * x
print(beloning, beloningPROG)
cijferCSN = float(input('Wat is je cijfer voor CSN?: '))
beloningCSN = cijferCSN * x
print(beloning, beloningCSN)
gemiddelde = beloningICOR + beloningPROG + beloningCSN
print('de gemiddelde beloning is:€ ', gemiddelde / 3)
totalevergoeding = beloningICOR + beloningPROG + beloningCSN
print('uw totale vergoeding is:€ ', totalevergoeding)
gemiddeld_cijfer = (cijferICOR + cijferPROG + cijferCSN) / 3
print('mijn cijfers gemiddeld is een', gemiddeld_cijfer,
'en dat levert een beloning op van: €', totalevergoeding)
|
flexible
|
{
"blob_id": "74bca94cbcba0851e13d855c02fbc13fb0b09e6a",
"index": 4263,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(beloning, beloningICOR)\n<mask token>\nprint(beloning, beloningPROG)\n<mask token>\nprint(beloning, beloningCSN)\n<mask token>\nprint('de gemiddelde beloning is:€ ', gemiddelde / 3)\n<mask token>\nprint('uw totale vergoeding is:€ ', totalevergoeding)\n<mask token>\nprint('mijn cijfers gemiddeld is een', gemiddeld_cijfer,\n 'en dat levert een beloning op van: €', totalevergoeding)\n",
"step-3": "cijferICOR = float(input('Wat is je cijfer voor ICOR?: '))\nx = 30\nbeloningICOR = cijferICOR * x\nbeloning = 'beloning €'\nprint(beloning, beloningICOR)\ncijferPROG = float(input('Wat is je cijfer voor PROG: '))\nbeloningPROG = cijferPROG * x\nprint(beloning, beloningPROG)\ncijferCSN = float(input('Wat is je cijfer voor CSN?: '))\nbeloningCSN = cijferCSN * x\nprint(beloning, beloningCSN)\ngemiddelde = beloningICOR + beloningPROG + beloningCSN\nprint('de gemiddelde beloning is:€ ', gemiddelde / 3)\ntotalevergoeding = beloningICOR + beloningPROG + beloningCSN\nprint('uw totale vergoeding is:€ ', totalevergoeding)\ngemiddeld_cijfer = (cijferICOR + cijferPROG + cijferCSN) / 3\nprint('mijn cijfers gemiddeld is een', gemiddeld_cijfer,\n 'en dat levert een beloning op van: €', totalevergoeding)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import List, Dict, Optional, Tuple
import torch.jit
from torch import nn as nn
from parlai.core.dict import DictionaryAgent
from parlai.core.torch_agent import TorchAgent
from parlai.utils.bpe import Gpt2BpeHelper
class TorchScriptGreedySearch(nn.Module):
"""
A helper class for exporting simple greedy-search models via TorchScript.
Models with extra inputs will need to override to include more variables.
"""
# We currently only support these specific dictionary settings
CAIRAOKE_DICT_PARAMS = {
"dict_class": "parlai.core.dict:DictionaryAgent",
"dict_initpath": None,
"dict_language": "english",
"dict_max_ngram_size": -1,
"dict_minfreq": 0,
"dict_maxtokens": -1,
"dict_tokenizer": "gpt2",
"dict_lower": False,
"dict_textfields": "text,labels",
"dict_loaded": True,
'bpe_debug': False,
}
def __init__(self, agent: TorchAgent):
super().__init__()
self.is_bart = agent.opt['model'] == 'bart'
# Dictionary/tokenization setup
for key, val in self.CAIRAOKE_DICT_PARAMS.items():
assert (
agent.opt.get(key, val) == val
), f'The only currently supported value of "{key}" is {val}!'
orig_dict: DictionaryAgent = agent.dict
orig_bpe: Gpt2BpeHelper = orig_dict.bpe
assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())
assert not any(
i for key in orig_bpe.bpe_ranks.keys() for i in key if '\n' in i
), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!"
fused_key_bpe_ranks = {
'\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()
}
# Cast the values as floats to be able to compare to float('inf') when doing BPE
# splitting
self.dict = ScriptableDictionaryAgent(
null_token=orig_dict.null_token,
end_token=orig_dict.end_token,
unk_token=orig_dict.unk_token,
start_token=orig_dict.start_token,
freq=orig_dict.freq,
tok2ind=orig_dict.tok2ind,
ind2tok=orig_dict.ind2tok,
bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],
bpe_encoder=orig_bpe.encoder,
bpe_byte_encoder=orig_bpe.byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=agent._get_special_tokens(),
)
# History tracking and start/end tokens
self.delimiter_tok = agent.history.delimiter_tok
self.history_size = agent.opt['history_size']
if agent.opt.get('history_add_global_end_token', None) is not None:
self.global_end_token = agent.dict[agent.dict.end_token]
else:
self.global_end_token = None
self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']
self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None
self.start_idx = agent.model.START_IDX
self.end_idx = agent.model.END_IDX
self.null_idx = agent.model.NULL_IDX
if self.is_bart:
self.initial_decoder_input = [self.end_idx, self.start_idx]
else:
self.initial_decoder_input = [self.start_idx]
agent.model.eval()
# Create versions of the model and decoder that will flatten the incremental
# state dict, as required by TorchScript
wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)
wrapped_model = ModelIncrStateFlattener(agent.model)
# Create sample inputs for tracing
sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)
encoder_states = agent.model.encoder(sample_tokens)
initial_generations = self._get_initial_decoder_input(sample_tokens)
latent, initial_incr_state = wrapped_decoder(
initial_generations, encoder_states
)
logits = agent.model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}
# Copy the initial incremental state, used when tracing the
# .reorder_decoder_incremental_state() method below, to avoid having it be
# mutated by the following line
incr_state = wrapped_model.reorder_decoder_incremental_state(
incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)
)
generations = torch.cat([initial_generations, preds], dim=1)
# Do tracing
self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)
self.decoder_first_pass = torch.jit.trace(
wrapped_decoder, (initial_generations, encoder_states), strict=False
)
# We do strict=False to avoid an error when passing a Dict out of
# decoder.forward()
self.partially_traced_model = torch.jit.trace_module(
wrapped_model,
{
'output': (latent[:, -1:, :]),
'reorder_decoder_incremental_state': (
initial_incr_state,
torch.tensor([0], dtype=torch.long, device=sample_tokens.device),
),
},
strict=False,
)
self.decoder_later_pass = torch.jit.trace(
wrapped_decoder, (generations, encoder_states, incr_state), strict=False
)
def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:
"""
Workaround because we can't use TGM._get_initial_decoder_input() directly.
When we try to call that function, we get a "RuntimeError: Type 'Tuple[int,
int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and
Tuples of Tensors can be traced" error.
"""
bsz = x.size(0)
return (
torch.tensor(self.initial_decoder_input, dtype=torch.long)
.expand(bsz, len(self.initial_decoder_input))
.to(x.device)
)
def parse(self, text: str) -> List[int]:
return self.dict.txt2vec(text)
def _v2t(self, vec: List[int]) -> str:
"""
Convert token indices to string of tokens.
"""
new_vec: List[int] = []
for i in vec:
if i == self.end_idx:
break
elif i != self.start_idx:
new_vec.append(i)
return self.dict.vec2txt(new_vec)
def forward(self, context: str, max_len: int = 128) -> str:
# Vectorize all lines of context
history_vecs: List[List[int]] = []
context_lines = context.split('\n')
if self.history_size > 0:
context_lines = context_lines[-self.history_size :]
for line in context_lines:
history_vecs.append(self.parse(line))
# Get full history vec
text_vecs: List[List[int]] = []
for vec in history_vecs[:-1]:
text_vecs += [vec]
text_vecs += [self.delimiter_tok]
text_vecs += [history_vecs[-1]]
if self.global_end_token is not None:
text_vecs += [[self.global_end_token]]
# Flatten text_vecs
flattened_text_vec: List[int] = []
for vec in text_vecs:
for token in vec:
flattened_text_vec.append(token)
# Format history vec given various logic
if self.text_truncate is not None:
if self.is_bart:
truncate_length = self.text_truncate - 2 # Start and end tokens
else:
truncate_length = self.text_truncate
if len(flattened_text_vec) > truncate_length:
flattened_text_vec = flattened_text_vec[-truncate_length:]
flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)
if self.is_bart:
flattened_text_vec = torch.cat(
[
torch.tensor([self.start_idx], dtype=torch.long),
flattened_text_vec,
torch.tensor([self.end_idx], dtype=torch.long),
],
dim=0,
)
# Pass through the encoder and decoder to generate tokens
batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim
encoder_states = self.encoder(batch_text_vec)
generations = self._get_initial_decoder_input(batch_text_vec)
# keep track of early stopping if all generations finish
seen_end = torch.zeros(
batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool
)
incr_state: Dict[str, torch.Tensor] = {}
for token_idx in range(max_len):
if token_idx == 0:
latent, incr_state = self.decoder_first_pass(
generations, encoder_states
)
else:
latent, incr_state = self.decoder_later_pass(
generations, encoder_states, incr_state
)
logits = self.partially_traced_model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = self.partially_traced_model.reorder_decoder_incremental_state(
incr_state,
torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),
)
seen_end = seen_end + (preds == self.end_idx).squeeze(1)
generations = torch.cat([generations, preds], dim=1)
if torch.all(seen_end):
break
# Get the label from the generated tokens and update the history
if self.is_bart:
assert generations[0, 0].item() == self.end_idx
generations = generations[:, 1:]
# Hack: remove initial end token. I haven't found in the code where this is
# done, but it seems to happen early on during generation
generation_tokens: List[int] = generations[0].tolist()
label = self._v2t(generation_tokens)
return label
class BaseIncrStateFlattener(nn.Module):
"""
Flatten/unflatten the incremental state for use with TorchScripting.
Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,
torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,
and previous key/value/mask, respectively. However, TorchScript expects dicts to be
of type Dict[str, torch.Tensor], and thus all input incremental states when
TorchScripting will have to be of that type. We thus unflatten the input incremental
state, already of type Dict[str, torch.Tensor], to pass it into whatever method
needs it, and we flatten it again after the updated incremental state is passed back
out.
This is a base class that provides methods for flattening/unflattening: subclasses
will call these methods as the incremental state is passed into and out of their own
methods.
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(
self, flat_incr_state: Dict[str, torch.Tensor]
) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda: defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
# Turn the nested defaultdicts back into regular dicts
def _flatten_incr_state(
self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(
self,
input_: torch.LongTensor,
encoder_state: Tuple[torch.Tensor, torch.Tensor],
flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(
input=input_, encoder_state=encoder_state, incr_state=structured_incr_state
)
new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(
self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor
) -> Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = self.module.reorder_decoder_incremental_state(
incremental_state=structured_incr_state, inds=inds
)
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) -> torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) -> List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']
if text[idx] == "'":
# Capture contradiction suffixes
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1 : idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or (
text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()
):
# Capture runs of one type of character
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isalpha()
):
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
else:
while (
last_matching_idx + 1 < len(text)
and not text[last_matching_idx + 1].isspace()
and not text[last_matching_idx + 1].isalpha()
and not text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
tokens.append(text[idx : last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
# Capture runs of space characters up until just before the final one
last_space_idx = idx + 1
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
if last_space_idx + 1 == len(text):
# Include the last char, which is a space char
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
# Capture runs of space characters
last_space_idx = idx
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(
self,
add_prefix_space: bool,
encoder: Dict[str, str],
byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
# special tokens
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) -> List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
# constants for readability
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
# special token detected, replace the chunk with small subchunks
# split by the special token
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
# add the special token as a delimiter
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) -> List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf'))
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) -> List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
# We iterate over "char", which is supposed to be a single
# character, because the TorchScripted version of the code
# correctly splits a string into single characters in
# self.utf8_chars() but the non-TorchScripted version doesn't
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) -> List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 0b10000000:
chars.append(s[i])
i += 1
else:
if byte < 0b11100000:
num_bytes = 2
elif byte < 0b11110000:
num_bytes = 3
elif byte < 0b11111000:
num_bytes = 4
elif byte < 0b11111100:
num_bytes = 5
elif byte < 0b11111110:
num_bytes = 6
elif byte < 0b11111111:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i : i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(
self,
null_token: str,
end_token: str,
unk_token: str,
start_token: str,
freq: Dict[str, int],
tok2ind: Dict[str, int],
ind2tok: Dict[int, str],
bpe_add_prefix_space: bool,
bpe_encoder: Dict[str, str],
bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
# cache unk token for later
self._unk_token_idx = self.tok2ind[self.unk_token]
# Initialize tokenizer
self.bpe = ScriptableGpt2BpeHelper(
add_prefix_space=bpe_add_prefix_space,
encoder=bpe_encoder,
byte_encoder=bpe_byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens,
)
def _word_lookup(self, key: str) -> int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) -> str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) -> List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
# calls the selected tokenizer function e.g. 're' => re_tokenize(text)
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) -> List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) -> List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) -> str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
|
normal
|
{
"blob_id": "27d5ff5b0253eea36d6b492e929c4220f4b4a5eb",
"index": 1564,
"step-1": "<mask token>\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n <mask token>\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-2": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n <mask token>\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-3": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-4": "<mask token>\n\n\nclass TorchScriptGreedySearch(nn.Module):\n <mask token>\n <mask token>\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n self.is_bart = agent.opt['model'] == 'bart'\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert agent.opt.get(key, val\n ) == val, f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(i for key in orig_bpe.bpe_ranks.keys() for i in key if\n '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {'\\n'.join(key): float(val) for key, val in\n orig_bpe.bpe_ranks.items()}\n self.dict = ScriptableDictionaryAgent(null_token=orig_dict.\n null_token, end_token=orig_dict.end_token, unk_token=orig_dict.\n unk_token, start_token=orig_dict.start_token, freq=orig_dict.\n freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.\n byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens())\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt[\n 'truncate']\n self.text_truncate = (self.text_truncate if self.text_truncate >= 0\n else None)\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n agent.model.eval()\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(initial_generations,\n encoder_states)\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n incr_state = wrapped_model.reorder_decoder_incremental_state(incr_state\n , torch.tensor([0], dtype=torch.long, device=sample_tokens.device))\n generations = torch.cat([initial_generations, preds], dim=1)\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(wrapped_decoder, (\n initial_generations, encoder_states), strict=False)\n self.partially_traced_model = torch.jit.trace_module(wrapped_model,\n {'output': latent[:, -1:, :],\n 'reorder_decoder_incremental_state': (initial_incr_state, torch\n .tensor([0], dtype=torch.long, device=sample_tokens.device))},\n strict=False)\n self.decoder_later_pass = torch.jit.trace(wrapped_decoder, (\n generations, encoder_states, incr_state), strict=False)\n <mask token>\n <mask token>\n <mask token>\n\n def forward(self, context: str, max_len: int=128) ->str:\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size:]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat([torch.tensor([self.start_idx],\n dtype=torch.long), flattened_text_vec, torch.tensor([self.\n end_idx], dtype=torch.long)], dim=0)\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0)\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n seen_end = torch.zeros(batch_text_vec.size(0), device=\n batch_text_vec.device, dtype=torch.bool)\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(generations,\n encoder_states)\n else:\n latent, incr_state = self.decoder_later_pass(generations,\n encoder_states, incr_state)\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = (self.partially_traced_model.\n reorder_decoder_incremental_state(incr_state, torch.tensor(\n [0], dtype=torch.long, device=batch_text_vec.device)))\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-5": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional, Tuple\n\nimport torch.jit\nfrom torch import nn as nn\n\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.core.torch_agent import TorchAgent\nfrom parlai.utils.bpe import Gpt2BpeHelper\n\n\nclass TorchScriptGreedySearch(nn.Module):\n \"\"\"\n A helper class for exporting simple greedy-search models via TorchScript.\n\n Models with extra inputs will need to override to include more variables.\n \"\"\"\n\n # We currently only support these specific dictionary settings\n CAIRAOKE_DICT_PARAMS = {\n \"dict_class\": \"parlai.core.dict:DictionaryAgent\",\n \"dict_initpath\": None,\n \"dict_language\": \"english\",\n \"dict_max_ngram_size\": -1,\n \"dict_minfreq\": 0,\n \"dict_maxtokens\": -1,\n \"dict_tokenizer\": \"gpt2\",\n \"dict_lower\": False,\n \"dict_textfields\": \"text,labels\",\n \"dict_loaded\": True,\n 'bpe_debug': False,\n }\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n\n self.is_bart = agent.opt['model'] == 'bart'\n\n # Dictionary/tokenization setup\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert (\n agent.opt.get(key, val) == val\n ), f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(\n i for key in orig_bpe.bpe_ranks.keys() for i in key if '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {\n '\\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()\n }\n # Cast the values as floats to be able to compare to float('inf') when doing BPE\n # splitting\n self.dict = ScriptableDictionaryAgent(\n null_token=orig_dict.null_token,\n end_token=orig_dict.end_token,\n unk_token=orig_dict.unk_token,\n start_token=orig_dict.start_token,\n freq=orig_dict.freq,\n tok2ind=orig_dict.tok2ind,\n ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder,\n bpe_byte_encoder=orig_bpe.byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens(),\n )\n\n # History tracking and start/end tokens\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']\n self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None\n\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n\n agent.model.eval()\n\n # Create versions of the model and decoder that will flatten the incremental\n # state dict, as required by TorchScript\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n\n # Create sample inputs for tracing\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(\n initial_generations, encoder_states\n )\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n # Copy the initial incremental state, used when tracing the\n # .reorder_decoder_incremental_state() method below, to avoid having it be\n # mutated by the following line\n incr_state = wrapped_model.reorder_decoder_incremental_state(\n incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)\n )\n generations = torch.cat([initial_generations, preds], dim=1)\n\n # Do tracing\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(\n wrapped_decoder, (initial_generations, encoder_states), strict=False\n )\n # We do strict=False to avoid an error when passing a Dict out of\n # decoder.forward()\n self.partially_traced_model = torch.jit.trace_module(\n wrapped_model,\n {\n 'output': (latent[:, -1:, :]),\n 'reorder_decoder_incremental_state': (\n initial_incr_state,\n torch.tensor([0], dtype=torch.long, device=sample_tokens.device),\n ),\n },\n strict=False,\n )\n self.decoder_later_pass = torch.jit.trace(\n wrapped_decoder, (generations, encoder_states, incr_state), strict=False\n )\n\n def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Workaround because we can't use TGM._get_initial_decoder_input() directly.\n\n When we try to call that function, we get a \"RuntimeError: Type 'Tuple[int,\n int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and\n Tuples of Tensors can be traced\" error.\n \"\"\"\n bsz = x.size(0)\n return (\n torch.tensor(self.initial_decoder_input, dtype=torch.long)\n .expand(bsz, len(self.initial_decoder_input))\n .to(x.device)\n )\n\n def parse(self, text: str) -> List[int]:\n return self.dict.txt2vec(text)\n\n def _v2t(self, vec: List[int]) -> str:\n \"\"\"\n Convert token indices to string of tokens.\n \"\"\"\n new_vec: List[int] = []\n for i in vec:\n if i == self.end_idx:\n break\n elif i != self.start_idx:\n new_vec.append(i)\n return self.dict.vec2txt(new_vec)\n\n def forward(self, context: str, max_len: int = 128) -> str:\n\n # Vectorize all lines of context\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size :]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n\n # Get full history vec\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n\n # Flatten text_vecs\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n\n # Format history vec given various logic\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2 # Start and end tokens\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat(\n [\n torch.tensor([self.start_idx], dtype=torch.long),\n flattened_text_vec,\n torch.tensor([self.end_idx], dtype=torch.long),\n ],\n dim=0,\n )\n\n # Pass through the encoder and decoder to generate tokens\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n # keep track of early stopping if all generations finish\n seen_end = torch.zeros(\n batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool\n )\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(\n generations, encoder_states\n )\n else:\n latent, incr_state = self.decoder_later_pass(\n generations, encoder_states, incr_state\n )\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = self.partially_traced_model.reorder_decoder_incremental_state(\n incr_state,\n torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),\n )\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n\n # Get the label from the generated tokens and update the history\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n # Hack: remove initial end token. I haven't found in the code where this is\n # done, but it seems to happen early on during generation\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts\n\n def _flatten_incr_state(\n self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(\n self,\n input_: torch.LongTensor,\n encoder_state: Tuple[torch.Tensor, torch.Tensor],\n flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(\n input=input_, encoder_state=encoder_state, incr_state=structured_incr_state\n )\n new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(\n self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor\n ) -> Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = self.module.reorder_decoder_incremental_state(\n incremental_state=structured_incr_state, inds=inds\n )\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) -> torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) -> List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']\n if text[idx] == \"'\":\n # Capture contradiction suffixes\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1 : idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or (\n text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()\n ):\n # Capture runs of one type of character\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isalpha()\n ):\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n else:\n while (\n last_matching_idx + 1 < len(text)\n and not text[last_matching_idx + 1].isspace()\n and not text[last_matching_idx + 1].isalpha()\n and not text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n tokens.append(text[idx : last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n # Capture runs of space characters up until just before the final one\n last_space_idx = idx + 1\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n # Include the last char, which is a space char\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n # Capture runs of space characters\n last_space_idx = idx\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(\n self,\n add_prefix_space: bool,\n encoder: Dict[str, str],\n byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.add_prefix_space = add_prefix_space\n\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n\n self.bpe_ranks = fused_key_bpe_ranks\n\n # special tokens\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n\n # constants for readability\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n # special token detected, replace the chunk with small subchunks\n # split by the special token\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n # add the special token as a delimiter\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n\n return output\n\n def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) -> List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n\n if len(pairs) == 0:\n return word\n\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf'))\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n # We iterate over \"char\", which is supposed to be a single\n # character, because the TorchScripted version of the code\n # correctly splits a string into single characters in\n # self.utf8_chars() but the non-TorchScripted version doesn't\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) -> List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 0b10000000:\n chars.append(s[i])\n i += 1\n else:\n if byte < 0b11100000:\n num_bytes = 2\n elif byte < 0b11110000:\n num_bytes = 3\n elif byte < 0b11111000:\n num_bytes = 4\n elif byte < 0b11111100:\n num_bytes = 5\n elif byte < 0b11111110:\n num_bytes = 6\n elif byte < 0b11111111:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i : i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(\n self,\n null_token: str,\n end_token: str,\n unk_token: str,\n start_token: str,\n freq: Dict[str, int],\n tok2ind: Dict[str, int],\n ind2tok: Dict[int, str],\n bpe_add_prefix_space: bool,\n bpe_encoder: Dict[str, str],\n bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n\n # cache unk token for later\n self._unk_token_idx = self.tok2ind[self.unk_token]\n\n # Initialize tokenizer\n self.bpe = ScriptableGpt2BpeHelper(\n add_prefix_space=bpe_add_prefix_space,\n encoder=bpe_encoder,\n byte_encoder=bpe_byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens,\n )\n\n def _word_lookup(self, key: str) -> int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) -> str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n\n # calls the selected tokenizer function e.g. 're' => re_tokenize(text)\n word_tokens = self.gpt2_tokenize(text)\n\n return word_tokens\n\n def bpe_tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) -> List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) -> str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-ids": [
24,
31,
32,
36,
43
]
}
|
[
24,
31,
32,
36,
43
] |
<|reserved_special_token_0|>
def parse_args(args=None):
parser = argparse.ArgumentParser(
'Plots information from haplotagging_stats tsv')
parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,
required=True, type=str, help='CSV file holding data')
parser.add_argument('--figure_name', '-f', dest='figure_name', default=
'HPRC_contig_stats', required=False, type=str, help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
<|reserved_special_token_0|>
def get_color(filename):
if 'maternal' in filename.lower():
return 'darkred'
if 'paternal' in filename.lower():
return 'darkblue'
return 'black'
def main():
args = parse_args()
df = pd.read_csv(args.input_csv)
print(df.head())
sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'darkred',
'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',
'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Contig Count')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.contig_count.png', format='png',
dpi=200)
plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',
dpi=300)
plt.show()
plt.close()
sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',
'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={
'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',
'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Total Length')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)
plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_args(args=None):
parser = argparse.ArgumentParser(
'Plots information from haplotagging_stats tsv')
parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,
required=True, type=str, help='CSV file holding data')
parser.add_argument('--figure_name', '-f', dest='figure_name', default=
'HPRC_contig_stats', required=False, type=str, help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
def log(msg):
print(msg, file=sys.stderr)
def get_color(filename):
if 'maternal' in filename.lower():
return 'darkred'
if 'paternal' in filename.lower():
return 'darkblue'
return 'black'
def main():
args = parse_args()
df = pd.read_csv(args.input_csv)
print(df.head())
sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'darkred',
'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',
'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Contig Count')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.contig_count.png', format='png',
dpi=200)
plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',
dpi=300)
plt.show()
plt.close()
sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',
'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={
'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',
'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Total Length')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)
plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NUM_CONTIGS = 'num_contigs'
TOTAL_LEN = 'total_len'
HAPLOTYPE = 'haplotype'
HAPLO_SEX = 'haplotype-sex'
SEX = 'sex'
def parse_args(args=None):
parser = argparse.ArgumentParser(
'Plots information from haplotagging_stats tsv')
parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,
required=True, type=str, help='CSV file holding data')
parser.add_argument('--figure_name', '-f', dest='figure_name', default=
'HPRC_contig_stats', required=False, type=str, help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
def log(msg):
print(msg, file=sys.stderr)
def get_color(filename):
if 'maternal' in filename.lower():
return 'darkred'
if 'paternal' in filename.lower():
return 'darkblue'
return 'black'
def main():
args = parse_args()
df = pd.read_csv(args.input_csv)
print(df.head())
sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'darkred',
'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',
'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Contig Count')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.contig_count.png', format='png',
dpi=200)
plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',
dpi=300)
plt.show()
plt.close()
sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',
'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={
'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',
'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Total Length')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)
plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
from glob import glob
import sys
import numpy as np
import matplotlib.pyplot as plt
import pysam
import math
import pandas as pd
import haplotagging_stats
import os
import collections
import seaborn as sns
NUM_CONTIGS = 'num_contigs'
TOTAL_LEN = 'total_len'
HAPLOTYPE = 'haplotype'
HAPLO_SEX = 'haplotype-sex'
SEX = 'sex'
def parse_args(args=None):
parser = argparse.ArgumentParser(
'Plots information from haplotagging_stats tsv')
parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,
required=True, type=str, help='CSV file holding data')
parser.add_argument('--figure_name', '-f', dest='figure_name', default=
'HPRC_contig_stats', required=False, type=str, help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
def log(msg):
print(msg, file=sys.stderr)
def get_color(filename):
if 'maternal' in filename.lower():
return 'darkred'
if 'paternal' in filename.lower():
return 'darkblue'
return 'black'
def main():
args = parse_args()
df = pd.read_csv(args.input_csv)
print(df.head())
sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'darkred',
'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',
'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Contig Count')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.contig_count.png', format='png',
dpi=200)
plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',
dpi=300)
plt.show()
plt.close()
sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',
'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={
'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',
'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})
spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[
'Maternal-Female', 'Maternal-Male', 'Paternal-Female',
'Paternal-Male'], palette={'Maternal-Male': 'royalblue',
'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',
'Paternal-Female': 'crimson'})
plt.title('')
plt.ylabel('Total Length')
plt.xlabel('Haplotype')
plt.tight_layout()
if args.figure_name is not None:
plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)
plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import argparse
from glob import glob
import sys
import numpy as np
import matplotlib.pyplot as plt
import pysam
import math
import pandas as pd
import haplotagging_stats
import os
import collections
import seaborn as sns
NUM_CONTIGS="num_contigs"
TOTAL_LEN="total_len"
HAPLOTYPE="haplotype"
HAPLO_SEX="haplotype-sex"
SEX="sex"
# cat Y1_assemblies_v2_genbank_QC.csv | sed 's/,/\t/g' | awk '{print $1,$2,$3,"Maternal","\n",$1,$6,$7,"Paternal"}' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv
# cat Y1_assemblies_v2_genbank_QC.full.csv | sed 's/,/\t/g' | awk '{print $1,$2,$3,"Maternal","Maternal-",$23,$23,"\n",$1,$6,$7,"Paternal","Paternal-",$23,$23}' | sed 's/- /-/g' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv
def parse_args(args = None):
parser = argparse.ArgumentParser("Plots information from haplotagging_stats tsv")
parser.add_argument('--input_csv', '-i', dest='input_csv', default=None, required=True, type=str,
help='CSV file holding data')
parser.add_argument('--figure_name', '-f', dest='figure_name', default="HPRC_contig_stats", required=False, type=str,
help='Figure name')
return parser.parse_args() if args is None else parser.parse_args(args)
def log(msg):
print(msg, file=sys.stderr)
def get_color(filename):
if "maternal" in filename.lower():
return "darkred"
if "paternal" in filename.lower():
return "darkblue"
return "black"
def main():
args = parse_args()
df = pd.read_csv(args.input_csv)
print(df.head())
# sns.set_palette(sns.color_palette(["darkred", "darkblue"]))
# sns.boxplot(x=HAPLOTYPE, y=NUM_CONTIGS, data=df)#, palette={"Maternal":"darkred","Paternal":"darkblue"})
# spax = sns.swarmplot(x=HAPLOTYPE, y=NUM_CONTIGS, hue=SEX, data=df, palette={"Female":"fuchsia","Male":"cyan"}) #color="fuchsia")
sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"],
palette={"Maternal-Male":"darkred","Maternal-Female":"darkred","Paternal-Male":"darkblue","Paternal-Female":"darkblue"})
spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"],
palette={"Maternal-Male":"royalblue","Maternal-Female":"crimson","Paternal-Male":"royalblue","Paternal-Female":"crimson"})
plt.title("")
plt.ylabel("Contig Count")
plt.xlabel("Haplotype")
plt.tight_layout()
# plt.set_size_inches(12, 12)
#
if args.figure_name is not None:
plt.savefig(args.figure_name+".contig_count.png", format='png', dpi=200)
plt.savefig(args.figure_name+".contig_count.pdf", format='pdf', dpi=300)
plt.show()
plt.close()
# sns.boxplot(x=HAPLOTYPE, y=TOTAL_LEN, data=df)#, palette={"Maternal":"darkred","Paternal":"darkblue"})
# spax = sns.swarmplot(x=HAPLOTYPE, y=TOTAL_LEN, hue=SEX, data=df, palette={"Female":"fuchsia","Male":"cyan"}) #color="fuchsia")
sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"],
palette={"Maternal-Male":"darkred","Maternal-Female":"darkred","Paternal-Male":"darkblue","Paternal-Female":"darkblue"})
spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=["Maternal-Female", "Maternal-Male", "Paternal-Female", "Paternal-Male"],
palette={"Maternal-Male":"royalblue","Maternal-Female":"crimson","Paternal-Male":"royalblue","Paternal-Female":"crimson"})
plt.title("")
plt.ylabel("Total Length")
plt.xlabel("Haplotype")
plt.tight_layout()
# plt.set_size_inches(12, 12)
#
if args.figure_name is not None:
plt.savefig(args.figure_name+".total_len.png", format='png', dpi=200)
plt.savefig(args.figure_name+".total_len.pdf", format='pdf', dpi=300)
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "0c7816028e6cbd12684b0c7484835e735f1d2838",
"index": 4327,
"step-1": "<mask token>\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\n<mask token>\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nNUM_CONTIGS = 'num_contigs'\nTOTAL_LEN = 'total_len'\nHAPLOTYPE = 'haplotype'\nHAPLO_SEX = 'haplotype-sex'\nSEX = 'sex'\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nfrom glob import glob\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pysam\nimport math\nimport pandas as pd\nimport haplotagging_stats\nimport os\nimport collections\nimport seaborn as sns\nNUM_CONTIGS = 'num_contigs'\nTOTAL_LEN = 'total_len'\nHAPLOTYPE = 'haplotype'\nHAPLO_SEX = 'haplotype-sex'\nSEX = 'sex'\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n 'Plots information from haplotagging_stats tsv')\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None,\n required=True, type=str, help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\n 'HPRC_contig_stats', required=False, type=str, help='Figure name')\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if 'maternal' in filename.lower():\n return 'darkred'\n if 'paternal' in filename.lower():\n return 'darkblue'\n return 'black'\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'darkred',\n 'Maternal-Female': 'darkred', 'Paternal-Male': 'darkblue',\n 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Contig Count')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.contig_count.png', format='png',\n dpi=200)\n plt.savefig(args.figure_name + '.contig_count.pdf', format='pdf',\n dpi=300)\n plt.show()\n plt.close()\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=['Maternal-Female',\n 'Maternal-Male', 'Paternal-Female', 'Paternal-Male'], palette={\n 'Maternal-Male': 'darkred', 'Maternal-Female': 'darkred',\n 'Paternal-Male': 'darkblue', 'Paternal-Female': 'darkblue'})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\n 'Maternal-Female', 'Maternal-Male', 'Paternal-Female',\n 'Paternal-Male'], palette={'Maternal-Male': 'royalblue',\n 'Maternal-Female': 'crimson', 'Paternal-Male': 'royalblue',\n 'Paternal-Female': 'crimson'})\n plt.title('')\n plt.ylabel('Total Length')\n plt.xlabel('Haplotype')\n plt.tight_layout()\n if args.figure_name is not None:\n plt.savefig(args.figure_name + '.total_len.png', format='png', dpi=200)\n plt.savefig(args.figure_name + '.total_len.pdf', format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\nimport argparse\nfrom glob import glob\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pysam\nimport math\nimport pandas as pd\nimport haplotagging_stats\nimport os\nimport collections\nimport seaborn as sns\n\nNUM_CONTIGS=\"num_contigs\"\nTOTAL_LEN=\"total_len\"\nHAPLOTYPE=\"haplotype\"\nHAPLO_SEX=\"haplotype-sex\"\nSEX=\"sex\"\n\n# cat Y1_assemblies_v2_genbank_QC.csv | sed 's/,/\\t/g' | awk '{print $1,$2,$3,\"Maternal\",\"\\n\",$1,$6,$7,\"Paternal\"}' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv\n# cat Y1_assemblies_v2_genbank_QC.full.csv | sed 's/,/\\t/g' | awk '{print $1,$2,$3,\"Maternal\",\"Maternal-\",$23,$23,\"\\n\",$1,$6,$7,\"Paternal\",\"Paternal-\",$23,$23}' | sed 's/- /-/g' | sed 's/^ //' | sed 's/ $//' | sed 's/ /,/g' | sed 's/mat_//g' | sed 's/pat_//g' >Y1_assemblies_v2_genbank_QC.contig_stats.csv\ndef parse_args(args = None):\n parser = argparse.ArgumentParser(\"Plots information from haplotagging_stats tsv\")\n parser.add_argument('--input_csv', '-i', dest='input_csv', default=None, required=True, type=str,\n help='CSV file holding data')\n parser.add_argument('--figure_name', '-f', dest='figure_name', default=\"HPRC_contig_stats\", required=False, type=str,\n help='Figure name')\n\n return parser.parse_args() if args is None else parser.parse_args(args)\n\n\ndef log(msg):\n print(msg, file=sys.stderr)\n\n\ndef get_color(filename):\n if \"maternal\" in filename.lower():\n return \"darkred\"\n if \"paternal\" in filename.lower():\n return \"darkblue\"\n return \"black\"\n\n\ndef main():\n args = parse_args()\n df = pd.read_csv(args.input_csv)\n print(df.head())\n # sns.set_palette(sns.color_palette([\"darkred\", \"darkblue\"]))\n\n # sns.boxplot(x=HAPLOTYPE, y=NUM_CONTIGS, data=df)#, palette={\"Maternal\":\"darkred\",\"Paternal\":\"darkblue\"})\n # spax = sns.swarmplot(x=HAPLOTYPE, y=NUM_CONTIGS, hue=SEX, data=df, palette={\"Female\":\"fuchsia\",\"Male\":\"cyan\"}) #color=\"fuchsia\")\n\n sns.boxplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"darkred\",\"Maternal-Female\":\"darkred\",\"Paternal-Male\":\"darkblue\",\"Paternal-Female\":\"darkblue\"})\n spax = sns.swarmplot(x=HAPLO_SEX, y=NUM_CONTIGS, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"royalblue\",\"Maternal-Female\":\"crimson\",\"Paternal-Male\":\"royalblue\",\"Paternal-Female\":\"crimson\"})\n\n\n plt.title(\"\")\n plt.ylabel(\"Contig Count\")\n plt.xlabel(\"Haplotype\")\n plt.tight_layout()\n # plt.set_size_inches(12, 12)\n #\n if args.figure_name is not None:\n plt.savefig(args.figure_name+\".contig_count.png\", format='png', dpi=200)\n plt.savefig(args.figure_name+\".contig_count.pdf\", format='pdf', dpi=300)\n plt.show()\n plt.close()\n\n # sns.boxplot(x=HAPLOTYPE, y=TOTAL_LEN, data=df)#, palette={\"Maternal\":\"darkred\",\"Paternal\":\"darkblue\"})\n # spax = sns.swarmplot(x=HAPLOTYPE, y=TOTAL_LEN, hue=SEX, data=df, palette={\"Female\":\"fuchsia\",\"Male\":\"cyan\"}) #color=\"fuchsia\")\n\n sns.boxplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"darkred\",\"Maternal-Female\":\"darkred\",\"Paternal-Male\":\"darkblue\",\"Paternal-Female\":\"darkblue\"})\n spax = sns.swarmplot(x=HAPLO_SEX, y=TOTAL_LEN, data=df, order=[\"Maternal-Female\", \"Maternal-Male\", \"Paternal-Female\", \"Paternal-Male\"],\n palette={\"Maternal-Male\":\"royalblue\",\"Maternal-Female\":\"crimson\",\"Paternal-Male\":\"royalblue\",\"Paternal-Female\":\"crimson\"})\n\n\n\n plt.title(\"\")\n plt.ylabel(\"Total Length\")\n plt.xlabel(\"Haplotype\")\n plt.tight_layout()\n # plt.set_size_inches(12, 12)\n #\n if args.figure_name is not None:\n plt.savefig(args.figure_name+\".total_len.png\", format='png', dpi=200)\n plt.savefig(args.figure_name+\".total_len.pdf\", format='pdf', dpi=300)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import pytest
from a3 import *
from test_utils import *
from numpy import allclose
def test_problem_7_1_8():
assert(check_linalg())
assert(abs(problem_7_1_8(5000)-84.8)<1)
|
normal
|
{
"blob_id": "c7553cadb49c9c7e80a7800b9bff4d5f64796494",
"index": 7568,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_problem_7_1_8():\n assert check_linalg()\n assert abs(problem_7_1_8(5000) - 84.8) < 1\n",
"step-3": "import pytest\nfrom a3 import *\nfrom test_utils import *\nfrom numpy import allclose\n\n\ndef test_problem_7_1_8():\n assert check_linalg()\n assert abs(problem_7_1_8(5000) - 84.8) < 1\n",
"step-4": "import pytest\nfrom a3 import *\nfrom test_utils import *\nfrom numpy import allclose\n\ndef test_problem_7_1_8():\n assert(check_linalg())\n assert(abs(problem_7_1_8(5000)-84.8)<1)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def rm(ids):
cmd = f"{runtime} rm {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def stop(ids):
cmd = f"{runtime} stop {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rm(ids):
cmd = f"{runtime} rm {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def stop(ids):
cmd = f"{runtime} stop {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def undeploy_containers():
containers = [container for _, container in get_containers().items()]
running = [c for c in containers if c.up]
if len(running) > 0:
stop([container.id for container in running])
time.sleep(5)
containers = [container for _, container in get_containers().items()]
if len(containers) > 0:
rm([container.id for container in containers])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
runtime = get_container_runtime()
def rm(ids):
cmd = f"{runtime} rm {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def stop(ids):
cmd = f"{runtime} stop {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def undeploy_containers():
containers = [container for _, container in get_containers().items()]
running = [c for c in containers if c.up]
if len(running) > 0:
stop([container.id for container in running])
time.sleep(5)
containers = [container for _, container in get_containers().items()]
if len(containers) > 0:
rm([container.id for container in containers])
if __name__ == '__main__':
undeploy_containers()
<|reserved_special_token_1|>
import sys
import time
from cli.utils import get_container_runtime, get_containers, run_shell_cmd
runtime = get_container_runtime()
def rm(ids):
cmd = f"{runtime} rm {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def stop(ids):
cmd = f"{runtime} stop {' '.join(ids)}"
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def undeploy_containers():
containers = [container for _, container in get_containers().items()]
running = [c for c in containers if c.up]
if len(running) > 0:
stop([container.id for container in running])
time.sleep(5)
containers = [container for _, container in get_containers().items()]
if len(containers) > 0:
rm([container.id for container in containers])
if __name__ == '__main__':
undeploy_containers()
<|reserved_special_token_1|>
import sys
import time
from cli.utils import get_container_runtime, get_containers, run_shell_cmd
runtime = get_container_runtime()
def rm(ids):
cmd = f'{runtime} rm {" ".join(ids)}'
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def stop(ids):
cmd = f'{runtime} stop {" ".join(ids)}'
sys.stdout.write(f'{cmd}\n')
run_shell_cmd(cmd)
def undeploy_containers():
containers = [ container for _, container in get_containers().items() ]
running = [ c for c in containers if c.up ]
if len(running) > 0:
stop([ container.id for container in running ])
time.sleep(5)
containers = [ container for _, container in get_containers().items() ]
if len(containers) > 0:
rm([ container.id for container in containers ])
if __name__ == '__main__':
undeploy_containers()
|
flexible
|
{
"blob_id": "eb3a32c17d8e5e9f717e813d5612d077c8feac48",
"index": 7145,
"step-1": "<mask token>\n\n\ndef rm(ids):\n cmd = f\"{runtime} rm {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef stop(ids):\n cmd = f\"{runtime} stop {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rm(ids):\n cmd = f\"{runtime} rm {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef stop(ids):\n cmd = f\"{runtime} stop {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef undeploy_containers():\n containers = [container for _, container in get_containers().items()]\n running = [c for c in containers if c.up]\n if len(running) > 0:\n stop([container.id for container in running])\n time.sleep(5)\n containers = [container for _, container in get_containers().items()]\n if len(containers) > 0:\n rm([container.id for container in containers])\n\n\n<mask token>\n",
"step-3": "<mask token>\nruntime = get_container_runtime()\n\n\ndef rm(ids):\n cmd = f\"{runtime} rm {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef stop(ids):\n cmd = f\"{runtime} stop {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef undeploy_containers():\n containers = [container for _, container in get_containers().items()]\n running = [c for c in containers if c.up]\n if len(running) > 0:\n stop([container.id for container in running])\n time.sleep(5)\n containers = [container for _, container in get_containers().items()]\n if len(containers) > 0:\n rm([container.id for container in containers])\n\n\nif __name__ == '__main__':\n undeploy_containers()\n",
"step-4": "import sys\nimport time\nfrom cli.utils import get_container_runtime, get_containers, run_shell_cmd\nruntime = get_container_runtime()\n\n\ndef rm(ids):\n cmd = f\"{runtime} rm {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef stop(ids):\n cmd = f\"{runtime} stop {' '.join(ids)}\"\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef undeploy_containers():\n containers = [container for _, container in get_containers().items()]\n running = [c for c in containers if c.up]\n if len(running) > 0:\n stop([container.id for container in running])\n time.sleep(5)\n containers = [container for _, container in get_containers().items()]\n if len(containers) > 0:\n rm([container.id for container in containers])\n\n\nif __name__ == '__main__':\n undeploy_containers()\n",
"step-5": "import sys\nimport time\nfrom cli.utils import get_container_runtime, get_containers, run_shell_cmd\n\nruntime = get_container_runtime()\n\ndef rm(ids):\n cmd = f'{runtime} rm {\" \".join(ids)}'\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef stop(ids):\n cmd = f'{runtime} stop {\" \".join(ids)}'\n sys.stdout.write(f'{cmd}\\n')\n run_shell_cmd(cmd)\n\n\ndef undeploy_containers():\n containers = [ container for _, container in get_containers().items() ]\n running = [ c for c in containers if c.up ]\n if len(running) > 0:\n stop([ container.id for container in running ])\n\n time.sleep(5)\n containers = [ container for _, container in get_containers().items() ]\n\n if len(containers) > 0:\n rm([ container.id for container in containers ])\n\n\nif __name__ == '__main__':\n undeploy_containers()",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
"""!
@brief Example 04
@details pyAudioAnalysis spectrogram calculation and visualization example
@author Theodoros Giannakopoulos {[email protected]}
"""
import numpy as np
import scipy.io.wavfile as wavfile
import plotly
import plotly.graph_objs as go
from pyAudioAnalysis import ShortTermFeatures as aF
layout = go.Layout(title='Spectrogram Extraction Example using pyAudioAnalysis',
xaxis=dict(title='time (sec)',),
yaxis=dict(title='Freqs (Hz)',))
def normalize_signal(signal):
signal = np.double(signal)
signal = signal / (2.0 ** 15)
signal = (signal - signal.mean())
return signal / ((np.abs(signal)).max() + 0.0000000001)
if __name__ == '__main__':
[Fs, s] = wavfile.read("../data/sample_music.wav")
s = normalize_signal(s)
[S, t, f] = aF.spectrogram(s, Fs, int(Fs * 0.020), int(Fs * 0.020))
heatmap = go.Heatmap(z=S.T, y=f, x=t)
plotly.offline.plot(go.Figure(data=[heatmap], layout=layout),
filename="temp.html", auto_open=True)
|
normal
|
{
"blob_id": "cb40141eddce9ce11fbd8475fc7c3d37438208a6",
"index": 6862,
"step-1": "<mask token>\n\n\ndef normalize_signal(signal):\n signal = np.double(signal)\n signal = signal / 2.0 ** 15\n signal = signal - signal.mean()\n return signal / (np.abs(signal).max() + 1e-10)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalize_signal(signal):\n signal = np.double(signal)\n signal = signal / 2.0 ** 15\n signal = signal - signal.mean()\n return signal / (np.abs(signal).max() + 1e-10)\n\n\nif __name__ == '__main__':\n [Fs, s] = wavfile.read('../data/sample_music.wav')\n s = normalize_signal(s)\n [S, t, f] = aF.spectrogram(s, Fs, int(Fs * 0.02), int(Fs * 0.02))\n heatmap = go.Heatmap(z=S.T, y=f, x=t)\n plotly.offline.plot(go.Figure(data=[heatmap], layout=layout), filename=\n 'temp.html', auto_open=True)\n",
"step-3": "<mask token>\nlayout = go.Layout(title=\n 'Spectrogram Extraction Example using pyAudioAnalysis', xaxis=dict(\n title='time (sec)'), yaxis=dict(title='Freqs (Hz)'))\n\n\ndef normalize_signal(signal):\n signal = np.double(signal)\n signal = signal / 2.0 ** 15\n signal = signal - signal.mean()\n return signal / (np.abs(signal).max() + 1e-10)\n\n\nif __name__ == '__main__':\n [Fs, s] = wavfile.read('../data/sample_music.wav')\n s = normalize_signal(s)\n [S, t, f] = aF.spectrogram(s, Fs, int(Fs * 0.02), int(Fs * 0.02))\n heatmap = go.Heatmap(z=S.T, y=f, x=t)\n plotly.offline.plot(go.Figure(data=[heatmap], layout=layout), filename=\n 'temp.html', auto_open=True)\n",
"step-4": "<mask token>\nimport numpy as np\nimport scipy.io.wavfile as wavfile\nimport plotly\nimport plotly.graph_objs as go\nfrom pyAudioAnalysis import ShortTermFeatures as aF\nlayout = go.Layout(title=\n 'Spectrogram Extraction Example using pyAudioAnalysis', xaxis=dict(\n title='time (sec)'), yaxis=dict(title='Freqs (Hz)'))\n\n\ndef normalize_signal(signal):\n signal = np.double(signal)\n signal = signal / 2.0 ** 15\n signal = signal - signal.mean()\n return signal / (np.abs(signal).max() + 1e-10)\n\n\nif __name__ == '__main__':\n [Fs, s] = wavfile.read('../data/sample_music.wav')\n s = normalize_signal(s)\n [S, t, f] = aF.spectrogram(s, Fs, int(Fs * 0.02), int(Fs * 0.02))\n heatmap = go.Heatmap(z=S.T, y=f, x=t)\n plotly.offline.plot(go.Figure(data=[heatmap], layout=layout), filename=\n 'temp.html', auto_open=True)\n",
"step-5": "\"\"\"! \n@brief Example 04\n@details pyAudioAnalysis spectrogram calculation and visualization example\n@author Theodoros Giannakopoulos {[email protected]}\n\"\"\"\nimport numpy as np\nimport scipy.io.wavfile as wavfile\nimport plotly\nimport plotly.graph_objs as go\nfrom pyAudioAnalysis import ShortTermFeatures as aF\nlayout = go.Layout(title='Spectrogram Extraction Example using pyAudioAnalysis',\n xaxis=dict(title='time (sec)',),\n yaxis=dict(title='Freqs (Hz)',))\n\ndef normalize_signal(signal):\n signal = np.double(signal)\n signal = signal / (2.0 ** 15)\n signal = (signal - signal.mean())\n return signal / ((np.abs(signal)).max() + 0.0000000001)\n\nif __name__ == '__main__':\n [Fs, s] = wavfile.read(\"../data/sample_music.wav\")\n s = normalize_signal(s)\n [S, t, f] = aF.spectrogram(s, Fs, int(Fs * 0.020), int(Fs * 0.020))\n heatmap = go.Heatmap(z=S.T, y=f, x=t)\n plotly.offline.plot(go.Figure(data=[heatmap], layout=layout),\n filename=\"temp.html\", auto_open=True)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import ctypes
import time
from order_queue.order import Order
class stock(ctypes.Structure):
_fields_ = [('stock_id', ctypes.c_int), ('order_type',ctypes.c_int),('Time',ctypes.c_char * 40),('user_id',ctypes.c_int),('volume',ctypes.c_int),
('price',ctypes.c_double)
]
class exchange(ctypes.Structure):
_fields_ = [
('stock_id',ctypes.c_int),
('buy_id',ctypes.c_int),
('sell_id',ctypes.c_int),
('Time',ctypes.c_char * 40),
('volume',ctypes.c_int),
('price',ctypes.c_double)
]
class TestSturcture(ctypes.Structure):
_fields_ = [
('a',ctypes.c_int),
('n',ctypes.c_int)
]
def time_conversion(input):
get = time.strftime("%H:%M:%S", input).encode('utf-8')
return get
def order_conversion(order):
get_time = time_conversion(order.time)
get = stock(int(order.get_stock_id()),int(order.get_direction()),get_time,int(order.get_user_id()[1:]),int(order.get_volume()),float(order.get_price()))
return get
def regenerate_order(result,long_order,short_order):
deal_volume = result.volume
if int(long_order.get_volume()) != result.volume:
left_volume = int(long_order.get_volume()) - result.volume
left_order = long_order
elif int(short_order.get_volume()) != result.volume:
left_volume = int(long_order.get_volume()) - result.volume
left_order = short_order
else:
return None
order = Order( left_order.get_stock_id(),left_order.get_user_id(),left_order.get_price(),left_volume,left_order.get_direction())
return order
if __name__ == '__main__':
print(time_conversion(time.localtime(time.time())))
|
normal
|
{
"blob_id": "7491a17256b9bc7af0953202e45f0fd9d5c34c40",
"index": 8376,
"step-1": "<mask token>\n\n\nclass exchange(ctypes.Structure):\n <mask token>\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass stock(ctypes.Structure):\n <mask token>\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type', ctypes.c_int), (\n 'Time', ctypes.c_char * 40), ('user_id', ctypes.c_int), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type', ctypes.c_int), (\n 'Time', ctypes.c_char * 40), ('user_id', ctypes.c_int), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass exchange(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('buy_id', ctypes.c_int), (\n 'sell_id', ctypes.c_int), ('Time', ctypes.c_char * 40), ('volume',\n ctypes.c_int), ('price', ctypes.c_double)]\n\n\nclass TestSturcture(ctypes.Structure):\n _fields_ = [('a', ctypes.c_int), ('n', ctypes.c_int)]\n\n\ndef time_conversion(input):\n get = time.strftime('%H:%M:%S', input).encode('utf-8')\n return get\n\n\n<mask token>\n\n\ndef regenerate_order(result, long_order, short_order):\n deal_volume = result.volume\n if int(long_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = long_order\n elif int(short_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = short_order\n else:\n return None\n order = Order(left_order.get_stock_id(), left_order.get_user_id(),\n left_order.get_price(), left_volume, left_order.get_direction())\n return order\n\n\n<mask token>\n",
"step-5": "import ctypes\nimport time\nfrom order_queue.order import Order\n\nclass stock(ctypes.Structure):\n _fields_ = [('stock_id', ctypes.c_int), ('order_type',ctypes.c_int),('Time',ctypes.c_char * 40),('user_id',ctypes.c_int),('volume',ctypes.c_int),\n ('price',ctypes.c_double)\n ]\nclass exchange(ctypes.Structure):\n _fields_ = [\n ('stock_id',ctypes.c_int),\n ('buy_id',ctypes.c_int),\n ('sell_id',ctypes.c_int),\n ('Time',ctypes.c_char * 40),\n ('volume',ctypes.c_int),\n ('price',ctypes.c_double)\n ]\nclass TestSturcture(ctypes.Structure):\n _fields_ = [\n ('a',ctypes.c_int),\n ('n',ctypes.c_int)\n ]\ndef time_conversion(input):\n get = time.strftime(\"%H:%M:%S\", input).encode('utf-8')\n return get\n\ndef order_conversion(order):\n get_time = time_conversion(order.time)\n get = stock(int(order.get_stock_id()),int(order.get_direction()),get_time,int(order.get_user_id()[1:]),int(order.get_volume()),float(order.get_price()))\n return get\n\ndef regenerate_order(result,long_order,short_order):\n deal_volume = result.volume\n if int(long_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = long_order\n elif int(short_order.get_volume()) != result.volume:\n left_volume = int(long_order.get_volume()) - result.volume\n left_order = short_order\n else:\n return None\n\n order = Order( left_order.get_stock_id(),left_order.get_user_id(),left_order.get_price(),left_volume,left_order.get_direction())\n return order\n\nif __name__ == '__main__':\n print(time_conversion(time.localtime(time.time())))",
"step-ids": [
3,
5,
6,
8,
12
]
}
|
[
3,
5,
6,
8,
12
] |
/opt/python3.7/lib/python3.7/_weakrefset.py
|
normal
|
{
"blob_id": "22f7f725d89db354b2e66ff145550192826af5ea",
"index": 9109,
"step-1": "/opt/python3.7/lib/python3.7/_weakrefset.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Created on Feb 10, 2013
@author: jens
Deprecated module for crystallogrphy related geometry operations. And a lot
of other stuff that I put here.
"""
import numpy as np
atomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8,
'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,
'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22,
'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,
'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}
covalence_radius = {'H': .37, 'He': .0, 'Li': 1.23, 'Be': .90, 'B': .80, 'C': .77,
'N': .74, 'O': .71, 'F': .72, 'Ne': 0., 'Na': 1.54, 'Mg': 1.36,
'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': .99, 'Ar': 0.,
'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22,
'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15,
'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20,
'Se': 1.16, 'Br': 1.14, 'Kr': 0.,
'Rb': 2.18} # , 191, 162, 145, 134, 130, 127, 125, 125, 128, 134, 148, 144, 141, 140, 136, 133, 0, 235, 198, 169, 165, 165, 164, 164, 162, 185, 161, 159, 159, 157, 157, 156, 170, 156, 144, 134, 130, 128, 126, 127, 130, 134, 149, 148, 147, 146, 146, 145, 0, 0, 0, 188, 165, 161, 142, 130, 151, 182, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
electro_negativ = {'H': 2.20, 'He': 5.50, 'Li': .97, 'Be': 1.47, 'B': 2.01, 'C': 2.50,
'N': 3.07, 'O': 3.50, 'F': 4.40, 'Ne': 4.80, 'Na': 1.01, 'Mg': 1.23,
'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': 3.20,
'K': .91, 'Ca': 1.04, 'Sc': 1.20, 'Ti': 1.32, 'V': 1.45,
'Cr': 1.56, 'Mn': 1.60, 'Fe': 1.64, 'Co': 1.70, 'Ni': 1.75,
'Cu': 1.75, 'Zn': 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.20,
'Se': 2.48, 'Br': 2.74, 'Kr': 2.90,
'Rb': .89} # , 99, 111, 122, 123, 130, 136, 142, 145, 130, 142, 146, 149, 172, 182, 201, 221, 240, 86, 97, 108, 108, 107, 107, 107, 107, 110, 111, 110, 110, 110, 111, 111, 106, 114, 123, 133, 140, 146, 152, 155, 142, 142, 144, 144, 155, 167 }
proton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B': '005', 'C': '006', 'N': '007', 'O': '008',
'F': '009', 'Ne': '010', 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015',
'S': '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021', 'Ti': '022',
'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co': '027', 'Ni': '028', 'Cu': '029',
'Zn': '030', 'Ga': '031', 'Ge': '032', 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}
number_proton = dict([[v, k] for k, v in proton_number.items()])
priority = {'3': '5',
'2': '4',
'1.5': '3',
'6': '2',
'5': '1',
'1': '0'}
def frac2cart(coords, matrix):
coords = np.dot(matrix, coords).flatten().tolist()[0]
return coords
def xd_element(name):
"""
Return the element of an atom as defined in it's label.
"""
try:
name = name[:2]
except:
pass
try:
covalence_radius[name]
except:
name = name[0]
return name
def Uiso(adp, mean='geometric'):
try:
adp = get_adp_as_matrix(adp)
eigvals = np.linalg.eigvals(adp)
if mean == 'geometric':
return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (1. / 3.)
elif mean == 'arithmetic':
return sum(eigvals) / 3.
else:
print('crystgeom: Error: please specify mean as \'geometric\' or \'arithmetic\'')
exit()
except:
return adp
def get_adp_as_matrix(adp):
if adp is None:
return None
return np.matrix([[adp[0], adp[3], adp[4]],
[adp[3], adp[1], adp[5]],
[adp[4], adp[5], adp[2]]])
def get_compound_properties(path):
"""
Reads a *.FChk file and returns a list containing the charge of
the compound, the number of electrons in the compound, the overall
lengths of the dipole moment vector and the total HF energy.
"""
filepointer = open(path)
charge = None
NE = None
E_HF = None
dipole = None
read_dipole = False
for line in filepointer:
if read_dipole:
read_dipole = False
dipole = [float(value) for value in line.split(' ') if '.' in value]
dipole = np.linalg.norm(dipole)
elif 'Charge' in line and not charge:
charge = line.split(' ')[-1].rstrip('\n')
elif 'Number of electrons' in line and not NE:
NE = line.split(' ')[-1].rstrip('\n')
elif 'Total Energy' in line and not E_HF:
E_HF = line.split(' ')[-1].rstrip('\n')
elif 'Dipole Moment' in line and not dipole:
read_dipole = True
if charge and NE and E_HF and dipole:
break
return [charge, NE, dipole, E_HF]
def center_molecule(atom_coords):
center = get_geom_center(atom_coords)
atom_coords = move_center_to_point(atom_coords, center)
return atom_coords
def get_pair_list(atom_elements_1, atom_coords_1,
atom_elements_2, atom_coords_2):
pair_list = []
for i in xrange(len(atom_coords_1)):
best_hit = (9, None)
for j in xrange(len(atom_coords_2)):
dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])
if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:
best_hit = (dist, j)
pair_list.append(best_hit[1])
# ===========================================================================
# print
# for i in xrange(len(pair_list)):
# print atom_atoms_1[i],atom_atoms_2[pair_list[i]]
#===========================================================================
return pair_list
def bond_order(bondxi,
threshold_single_meso=0.0847,
# ================================================================
# threshold_meso_double=0.184,
#================================================================
threshold_meso_double=0.0847,
threshold_double_triple=0.27):
"""
Returns the bond order between two atoms.
"""
if bondxi < threshold_single_meso:
order = '1'
elif bondxi < threshold_meso_double:
order = '1.5'
elif bondxi < threshold_double_triple:
order = '2'
else:
order = '3'
return order
# ===============================================================================
# def rotate_3D_symmetric(atom,source_atom):
# '''
# Rotates the ADP of 'atom' to match the orientation
# of 'source_atom.
# '''
# cosangle=np.dot(atom.orientation[0],source_atom.orientation[0])
# angle=np.arccos(cosangle)
# axis=np.cross(atom.orientation[0],source_atom.orientation[0])
# axis=axis/np.linalg.norm(axis)
# matrix=get_3drotation_matrix(axis,angle)
# orientation0_new=np.dot(source_atom.orientation[0],matrix)
# if np.linalg.norm(orientation0_new-atom.orientation[0])<0.00001:
# pass
# else:
# angle=angle*-1
# matrix=get_3drotation_matrix(axis,angle)
#
# atom.adp['cart_int']=rotate_adp(source_atom.adp['cart_int'],matrix)
#===============================================================================
def rotate_3D(atom, source_atom):
"""
Rotates the ADP of 'atom' to match the orientation
of 'source_atom.
"""
from lauescript.cryst.match import get_transform
lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]
lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]
matrix = get_transform(lst1, lst2, matrix=True)
adp = source_atom.adp['cart_int']
atom.adp['cart_int'] = rotate_adp(adp, matrix)
def xi(element1, element2, distance):
"""
Calculates the bond distinguishing parameter Xi.
"""
return (float(covalence_radius[element1]) + float(covalence_radius[element2]) -
(0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance)
def get_orientation_vector(atom1, atom2):
v = atom1.cart - atom2.cart
return v / np.linalg.norm(v)
def framework_crawler(atom, direction, rigid_group_old=None):
"""
Function to identify atoms belonging to a previosly defined rigid
group.
Arguments:
atom: the name of the first atom of the rigid group.
direction: the name of the second atom of the rigid group.
rigid_group_old: used by the function itself for consecutive calls.
Returns a list of atom names belonging to the rigid group.
"""
if not rigid_group_old:
rigid_group = [atom, direction]
else:
rigid_group = rigid_group_old
for atom in get_framework_neighbours(direction):
if not atom in rigid_group and not atom.element == 'H':
rigid_group.append(atom)
framework_crawler(rigid_group[0], atom, rigid_group)
if not rigid_group_old:
#=======================================================================
# print ' Determined rigid group:', [i.name for i in rigid_group]
#=======================================================================
return rigid_group
def get_closest_atom_of_element(element, atom, exclude=None):
"""
Returns the atom with the shortest distance to the given atom.
"""
for atom2 in atom.partner:
if (element == atom2.element or not element) and not atom2 == exclude:
return atom2
def get_atom_with_longest_bond(element, atom):
hit = None
for atom2 in atom.partner:
if element in atom2.name:
if np.linalg.norm(atom.cart - atom2.cart) < 1.8:
hit = atom2
else:
break
return hit
def get_framework_neighbours(atom, useH=True):
"""
Needs a ATOM.atom instance as argument.
Returns the names of the framework atoms bound to that atom.
"""
neighbourlist = []
for atom2 in atom.partner[:5]:
#if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:
if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(
covalence_radius[atom2.element]) + .1:
if not 'H' == atom2.element or useH:
neighbourlist.append(atom2)
return neighbourlist
#===============================================================================
# def get_framework_neighbours(atom,useH=True):
# """
# Needs a classes.atom instance as argument.
# Returns the names of the framework atoms bound to that atom.
# """
# neighbourlist=[]
# for atom2 in atom.partner[atom.molecule.name][1:5]:
# #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:
# if np.linalg.norm(atom.cart-atom2.cart)<=1.6:
# if not 'H(' in atom2.name or useH:
# neighbourlist.append(atom2)
# return neighbourlist
#===============================================================================
def read_meas_adp(data, path='xd.res', use='meas'):
"""
Reads the measured ADP from the xd.res file.
The parameters are stored in atom.adp['frac_meas'] and
atom.adp['cart_meas']
"""
use2 = 'frac_' + use
switch = False
filepointer = open(path, 'r')
atomname = None
for line in filepointer:
if switch:
split = [i for i in line.split(' ') if len(i) > 0]
if not len(split) == 6:
print('WARNING!!! Inconsistend number of floats while\
reading measured ADP.')
data['exp'][atomname].adp[use2] = split
switch = False
if '(' in line:
split = [i for i in line.split(' ') if len(i) > 0]
if split[0][-1] == ')':
switch = True
atomname = split[0]
use = 'cart_' + use
for atom in data['exp'].atoms:
# if use == 'cart_neut': print(atom)
atom.adp[use] = rotate_adp2(atom.adp[use2],
atom.molecule.frac2cartmatrix,
atom.molecule.cell)
return data
def reflect_adp(adp, planev):
"""
Returns the ADP after reflection on the plane defined by its normal
vector 'planev'.
"""
M = np.identity(4)
M[:3, :3] -= 2.0 * np.outer(planev, planev)
M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev
return rotate_adp(adp, M[:3, :3])
def eigenv2tensor(axis):
"""
Calculates the tensor representation of ADP from its priciple axis.
"""
vec = np.ones((3, 3))
vecval = np.ones((3, 3))
for i in xrange(len(axis)):
vmag = np.linalg.norm(axis[i])
v = axis[i] / vmag
#print v
vec[:, i] = v
vecval[:, i] = axis[i]
adp = np.linalg.solve(vec, vecval)
return adp
def get_adp_from_calc(vx, vy, vz):
"""
Calculates an ADP in its matrix representation from the three
principle axis representing the displacement ellipsoid.
The three principle axis of the ellipsoid are needed as arguments.
A Matrix representation of the ADP is returned.
"""
## lx=np.linalg.norm(vx)
## ly=np.linalg.norm(vy)
## lz=np.linalg.norm(vz)
lx = vx
ly = vy
lz = vz
L = np.matrix([[lx, 0, 0],
[0, ly, 0],
[0, 0, lz]])
## Vx=vx/lx
## Vy=vy/ly
## Vz=vz/lz
Vx = np.array([1, 0, 0])
Vy = np.array([0, 1, 0])
Vz = np.array([0, 0, 1])
V = np.matrix([[Vx[0], Vy[0], Vz[0]],
[Vx[1], Vy[1], Vz[1]],
[Vx[2], Vy[2], Vz[2]]])
Vinv = np.linalg.inv(V)
#print V,Vinv
M = np.dot(np.dot(Vinv, L), V)
#print M
return M
#===============================================================================
#
#
# def get_general_distances(coordlist1,coordlist2,atomlist1,atomlist2):
# """
# Calculates a distance dictionary between two sets of atoms.
# Returns a dictionary entry for every atom in atomlist1 with the inter atom
# distances and the corresponding atom name keyed to their atom type.
#
# This function is used by the get_best_point() function.
# """
# maindict={}
# for i in xrange(len(atomlist1)):
# distdict={}
# for j in xrange(len(atomlist2)):
# if not atomlist2[j][0] in distdict.keys():
# distdict[atomlist2[j][0]]=[[np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]]
# else:
# distdict[atomlist2[j][0]].append([np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]])
# ## print atomlist1[i],'aaaaaaaaaaa'
# maindict[atomlist1[i]]=distdict
# return maindict
#===============================================================================
def get_best_quaternion(coordlist1, coordlist2):
"""
Determines the the quaternion representing the best possible
transformation of two coordinate systems into each other using
a least sqare approach.
This function is used by the get_refined_rotation() function.
"""
M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
if len(coordlist1) <= len(coordlist2):
number = len(coordlist1)
else:
number = len(coordlist2)
for i in xrange(number):
aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))
M = M + aaa
N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])
N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])
N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])
N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])
N12 = float(M[1][:, 2] - M[2][:, 1])
N13 = float(M[2][:, 0] - M[0][:, 2])
N14 = float(M[0][:, 1] - M[1][:, 0])
N21 = float(N12)
N23 = float(M[0][:, 1] + M[1][:, 0])
N24 = float(M[2][:, 0] + M[0][:, 2])
N31 = float(N13)
N32 = float(N23)
N34 = float(M[1][:, 2] + M[2][:, 1])
N41 = float(N14)
N42 = float(N24)
N43 = float(N34)
N = np.matrix([[N11, N12, N13, N14],
[N21, N22, N23, N24],
[N31, N32, N33, N34],
[N41, N42, N43, N44]])
values, vectors = np.linalg.eig(N)
w = list(values)
quat = vectors[:, w.index(max(w))]
quat = np.array(quat).reshape(-1, ).tolist()
return quat, max(w)
def get_rotation_matrix_from_quaternion(q):
"""
Returns the rotation matrix equivalent of the given quaternion.
This function is used by the get_refined_rotation() function.
"""
R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],
2 * (q[1] * q[2] - q[0] * q[3]),
2 * (q[1] * q[3] + q[0] * q[2])],
[2 * (q[2] * q[1] + q[0] * q[3]),
q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],
2 * (q[2] * q[3] - q[0] * q[1])],
[2 * (q[3] * q[1] - q[0] * q[2]),
2 * (q[3] * q[2] + q[0] * q[1]),
q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])
return R
def get_geom_center(coordlist):
"""
Calculates the geometrical center of a set of points.
"""
return sum(coordlist) / len(coordlist)
def move_center_to_point(atomlist, point):
"""
Moves the geometrical center of the atoms in atomlist to the given point.
"""
for atom in range(len(atomlist)):
atomlist[atom] = atomlist[atom] - point
return atomlist
def rotate_adp_reverse(adp, rotmat):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmatT = np.transpose(rotmat)
adp = np.dot(rotmat, adp)
adp = np.dot(adp, rotmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp(adp, rotmat):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmatT = np.transpose(rotmat)
adp = np.dot(rotmatT, adp)
adp = np.dot(adp, rotmat)
# print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n'
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp2(adp, rotmat, cell):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmat = np.linalg.inv(rotmat)
rotmatT = np.transpose(rotmat)
Nmat = np.matrix([[1 / cell[0], 0, 0],
[0, 1 / cell[1], 0],
[0, 0, 1 / cell[2]]])
Nmat = np.linalg.inv(Nmat)
NmatT = np.transpose(Nmat)
adp = np.dot(rotmat, adp)
adp = np.dot(adp, rotmatT)
adp = np.dot(Nmat, adp)
adp = np.dot(adp, NmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp3(adp, rotmat, cell):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmati = np.matrix(rotmat)
rotmatiT = np.transpose(rotmati)
rotmat = np.linalg.inv(rotmat)
Nmat = np.matrix([[1 / cell[0], 0, 0],
[0, 1 / cell[1], 0],
[0, 0, 1 / cell[2]]])
Nmat = np.linalg.inv(Nmat)
NmatT = np.transpose(Nmat)
adp = np.dot(rotmati, adp)
adp = np.dot(adp, rotmatiT)
adp = np.dot(Nmat, adp)
adp = np.dot(adp, NmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_list_by(coordlist, R):
"""
Returns a list of coordinates where every position is rotated by
the the rotation matrix 'R'.
"""
for coord in xrange(len(coordlist)):
value = np.dot(R, coordlist[coord])
value = np.array(value).reshape(-1, ).tolist()
coordlist[coord] = value
return coordlist
def write_xyz(coords, name):
filepointer = open(name, 'w')
filepointer.write(str(len(coords)))
filepointer.write('\n' + name + '\n')
for line in coords:
filepointer.write('C ')
for coord in line:
filepointer.write(str(coord) + ' ')
filepointer.write('\n')
filepointer.close()
def write_xyzqt(coords, name):
filepointer = open(name, 'a')
filepointer.write(name + '\n')
for line in coords:
filepointer.write('C ')
for coord in line:
filepointer.write(' ' + str(coord))
filepointer.write('\n')
filepointer.close()
def get_3drotation_matrix(axis, angle):
"""
Returns the rotation matrix that rotates a vector around the given axis
by the given angle using the "Euler-Rodrigues formula".
"""
angle = angle #*-1
norm = np.linalg.norm(np.array(axis))
if norm > 0:
axis /= norm
ax, ay, az = axis[0], axis[1], axis[2]
cos, sin = np.cos(angle), np.sin(angle)
rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],
[ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],
[az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])
return rotmat
def get_normal_vector_of_plane(p1, p2, p3):
"""
Returns the normal vector of a plane defined by the points p1,p2 and p3.
"""
v12 = np.array(p1) - np.array(p2)
v13 = np.array(p1) - np.array(p3)
nvec = np.cross(v12, v13)
## print 'norm: '+str(np.linalg.norm(nvec))
return nvec / np.linalg.norm(nvec)
def read_gaussian_coords():
atomlist = []
filepointer = open('g98.out', 'r')
for line in filepointer.readlines():
if 'Distance' in line: break
try:
newline = [float(i) for i in line.split(' ') if len(i) > 0]
newline = [newline[:2], np.array(newline[3:])]
atomlist.append(newline)
except:
pass
return atomlist
def get_closest_neighbours(atomlist, neighbours=2):
"""
Returns a list where every element is a list of three atomnames. The second and third
names are the closest neighbours of the first names.
The argument is a list as returned by frac_to_cart and the number of neighbours to be
returned.
"""
print('atomlist', atomlist)
neighbourlist = []
for atom in atomlist:
listline = [atom[0][0]]
dists = []
distsc = []
for partner in atomlist:
dists.append(np.linalg.norm(atom[1] - partner[1]))
distsc.append(np.linalg.norm(atom[1] - partner[1]))
dists.remove(min(dists))
for _ in range(neighbours):
if min(dists) < 2.5:
listline.append(atomlist[distsc.index(min(dists))][0][0])
dists.remove(min(dists))
#listline.append(atomlist[distsc.index(min(dists))][0][0])
neighbourlist.append(listline)
return neighbourlist
def calculate_distance_matrix(atomlist):
"""
Calculates for every atom the distances to all other atoms
in atomlist.
Returns a list where every element is a list of all distances.
"""
distlist = []
for atom in atomlist:
atomdict = {}
for partner in atomlist:
if not str(int(partner[0][1])) in atomdict.keys():
atomdict[str(int(partner[0][1]))] = []
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
else:
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
atomdict[str(int(partner[0][1]))].sort()
distlist.append(atomdict)
return distlist
def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):
"""
The function is able to identify equal atoms of one molecule in different
coordinate systems independent of the molecule's orientaion.
"""
hitlist = []
for atom in distlist1:
atomtype = int(atomlist1[distlist1.index(atom)][0][1])
valuelist = []
for partner in distlist2:
partnertype = int(atomlist2[distlist2.index(partner)][0][1])
if atomtype == partnertype:
partnervalue = 0
keylist = partner.keys()
for key in keylist:
for element in xrange(len(atom[key])):
partnervalue += abs(atom[key][element] - partner[key][element])
else:
partnervalue = 9999999
valuelist.append(partnervalue)
minvalue = min(valuelist)
besthit = valuelist.index(minvalue)
hitlist.append(besthit)
def make_list_unique(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def get_influence_atoms(atomlist):
"""
Determines the atoms defining the chemical enviroment of a given atom by checking
their bonding partners. Only the first and second neighbours are considered.
"""
enviromentlist = []
trunclist = []
neighbourlist = get_closest_neighbours(atomlist, 4)
for neighbours in neighbourlist:
if neighbours[0][0] == "H":
neighbours = neighbours[:2]
if neighbours[0][0] == "O":
neighbours = neighbours[:3]
trunclist.append(neighbours)
for atom in trunclist:
newatom = []
for atom1partner in atom[1:]:
for partner in trunclist:
if partner[0] == atom1partner:
counter = 0
for atomi in partner:
if atomi[0] == 'H':
counter += 1
if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):
newatom += atom + partner[1:]
newatom = make_list_unique(newatom)
newatom.sort()
enviromentlist.append(newatom)
return enviromentlist
def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):
"""
The function is able to identify equivalent atoms in different molecules in different
coordinate systems independent of the molecule's orientaion.
"""
hitlist = []
for atom in distlist1:
atomtype = int(atomlist1[distlist1.index(atom)][0][1])
valuelist = []
for partner in distlist2:
partnertype = int(atomlist2[distlist2.index(partner)][0][1])
if atomtype == partnertype:
partnervalue = 0
keylist = partner.keys()
for key in keylist:
for element in xrange(len(atom[key])):
value = abs(atom[key][element] - partner[key][element])
partnervalue += value
else:
partnervalue = 9999999
valuelist.append(partnervalue)
minvalue = min(valuelist)
besthit = valuelist.index(minvalue)
hitlist.append(besthit)
def read_multiple_coordinates(fragmentnames):
"""
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a
dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.
"""
fragdict = {}
for name in fragmentnames:
path = name + '/'
cell, pos = read_coordinates(path)
atomlist = frac_to_cart(cell, pos)
atomdict = {}
for atom in atomlist:
atomdict[atom[0][0]] = atom[1]
fragdict[name] = atomlist
return fragdict
##def read_coordinates(path=''):
## """
## Reads the cell parameters from a 'xd.mas' file and the atomic positions
## from a 'xd.res' file.
## The function returns a list with the cell parameters and an dictionary which
## keys the atom name to its fractional coordinates.
## """
## maspointer=open(path+'xd.mas','r')
## respointer=open(path+'xd.res','r')
## positions={}
## keylist=[] #Needed to keep the atomlist order. This is important for the frequency read function.
## for line in maspointer.readlines():
## if 'CELL' in line:
## cell=[float(i) for i in line.split(" ") if '.' in i]
## for line in respointer.readlines():
## if '(' in line and not '!' in line:
## coords=[float(i) for i in line.split(" ") if '.' in i]
## coords=coords[:-1]
## key=line.split(" ")[0]
## keylist.append(key)
## positions[key]=coords
## sortkeylist=[]
## for i in xrange(len(keylist)):
## j=i+1
## for key in keylist:
## if j==int(key[2:-1]):
## sortkeylist.append(key)
## return cell,positions,sortkeylist
def read_xd_master_file(path, errorpointer):
"""
Returns the compound name and the cell parameters from a xd.mas style
file specified by 'path'.
"""
filepointer = open(path, 'r')
for line in filepointer.readlines():
if 'TITLE' in line:
compound_name = line.partition('!')[2].lstrip().rstrip()
if 'CELL' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
filepointer.close()
try:
return compound_name, cell
except:
errorpointer.write(path + '\n')
return None, None
def read_xd_parameter_file(path, sort=False):
respointer = open(path, 'r')
positions = {}
keylist = []
for line in respointer.readlines():
if '(' in line and not '!' in line:
coords = [float(i) for i in line.split(" ") if '.' in i]
coords = coords[:-1]
key = line.split(" ")[0]
keylist.append(key)
positions[key] = coords
if sort:
sortkeylist = []
for i in xrange(len(keylist)):
j = i + 1
for key in keylist:
number = get_number(key)
if j == int(number):
sortkeylist.append(key)
else:
sortkeylist = keylist
return positions, sortkeylist
def read_coordinates(path='', sort=True):
"""
Reads the cell parameters from a 'xd.mas' file and the atomic positions
from a 'xd.res' file.
The function returns a list with the cell parameters and an dictionary which
keys the atom name to its fractional coordinates.
"""
maspointer = open(path + 'xd.mas', 'r')
respointer = open(path + 'xd.res', 'r')
positions = {}
keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.
for line in maspointer.readlines():
if 'CELL ' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
for line in respointer.readlines():
if '(' in line and not '!' in line:
coords = [float(i) for i in line.split(" ") if '.' in i]
coords = coords[:-1]
key = line.split(" ")[0]
keylist.append(key)
positions[key] = coords
if sort:
sortkeylist = []
for i in xrange(len(keylist)):
j = i + 1
for key in keylist:
number = get_number(key)
if j == int(number):
sortkeylist.append(key)
else:
sortkeylist = keylist
return cell, positions, sortkeylist
def get_number(atomname):
"""
Returns the number in the brackets of an atomname.
"""
switch = False
number = ''
for char in atomname:
if char == ')':
switch = False
if switch:
number += char
if char == '(':
switch = True
return number
def frac_to_cart(cell, positions):
"""
Transforms a set of given fractional coordinates to cartesian coordinates.
Needs a list containing the cell parameters as its first argument and the dictionary
returned by read coordinates().
Returns a dictionary with cartesian coordinates analog to fractional dictionary.
"""
atomlist = []
counter = 1
a, b, c = cell[0], cell[1], cell[2]
alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi
v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \
+ 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))
transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],
[0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],
[0, 0, c * v / np.sin(gamma)]])
for atom in positions:
coordmatrix = np.dot(transmatrix, positions[str(atom)])
coordmatrix = np.array(coordmatrix).flatten().tolist()
atomlist.append([])
atomlist[-1].append([atom, atomtable[atom[0]]])
counter += 1
atomlist[-1].append(np.array(coordmatrix))
return atomlist
def list_to_dict(atomlist, full=False):
"""
Keys the coordinates of the atoms read from xd.res to the numerical part of its name.
"""
atomdict = {}
if full:
for atom in atomlist:
atomdict[atom[0]] = atom[1]
else:
for atom in atomlist:
atomdict[atom[0][0]] = atom[1]
return atomdict
#===============================================================================
# def link_atoms(gatomlist,xatomdict):
# """
# Returns a list of pairs of equivalten atoms.
# """
# linklist=[]
# keylist=xatomdict.keys()
# for atom in xrange(len(gatomlist)):
# for key in keylist:
# if int(key)==atom+1:
# linklistline=[atomlist[atom][1],xatomdict[key]]
# linklist.append(linklistline)
# break
# return linklist
#===============================================================================
#===============================================================================
# def get_random_plane(linklist):
# """
# Randomly picks three atoms to build a plane from.
# """
# planepoints=random.sample(linklist,3)
# gplanenorm=get_normal_vector_of_plane(planepoints[0][0],planepoints[1][0],planepoints[2][0])
# gplanedir=np.linalg.norm(planepoints[0][0]-planepoints[1][0])
# xplanenorm=get_normal_vector_of_plane(planepoints[0][1],planepoints[1][1],planepoints[2][1])
# xdplanedir=np.linalg.norm(planepoints[0][1]-planepoints[1][1])
# return gplanenorm,xplanenorm
#===============================================================================
def get_angle(v1, v2):
"""
Returns the angle between two vectors.
"""
return np.arccos(np.dot(v1, v2))
def read_invout_database(path):
path += 'Invariome.out'
filepointer = open(path, 'r')
invnames = {}
for line in filepointer.readlines():
splitted = line.split(' ')
invnames[splitted[0][:-1]] = splitted[1][:-1]
return invnames
|
normal
|
{
"blob_id": "27e685750e5caa2f80c5a6399b07435ee9aa9fb9",
"index": 7936,
"step-1": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\n<mask token>\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\n<mask token>\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\n<mask token>\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n<mask token>\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\n<mask token>\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\n<mask token>\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\n<mask token>\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\n<mask token>\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n<mask token>\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\n<mask token>\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n",
"step-3": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.\n orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n matrix = get_transform(lst1, lst2, matrix=True)\n adp = source_atom.adp['cart_int']\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\n<mask token>\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]])\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy\n [2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n M = np.dot(np.dot(Vinv, L), V)\n return M\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n distlist.append(atomdict)\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n\n def idfun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n positions = {}\n keylist = []\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n",
"step-4": "<mask token>\nimport numpy as np\natomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O':\n 8, 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,\n 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V':\n 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': \n 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}\ncovalence_radius = {'H': 0.37, 'He': 0.0, 'Li': 1.23, 'Be': 0.9, 'B': 0.8,\n 'C': 0.77, 'N': 0.74, 'O': 0.71, 'F': 0.72, 'Ne': 0.0, 'Na': 1.54, 'Mg':\n 1.36, 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': 0.99, 'Ar': \n 0.0, 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22, 'Cr': \n 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15, 'Cu': 1.17, 'Zn':\n 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.2, 'Se': 1.16, 'Br': 1.14, 'Kr': \n 0.0, 'Rb': 2.18}\nelectro_negativ = {'H': 2.2, 'He': 5.5, 'Li': 0.97, 'Be': 1.47, 'B': 2.01,\n 'C': 2.5, 'N': 3.07, 'O': 3.5, 'F': 4.4, 'Ne': 4.8, 'Na': 1.01, 'Mg': \n 1.23, 'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': \n 3.2, 'K': 0.91, 'Ca': 1.04, 'Sc': 1.2, 'Ti': 1.32, 'V': 1.45, 'Cr': \n 1.56, 'Mn': 1.6, 'Fe': 1.64, 'Co': 1.7, 'Ni': 1.75, 'Cu': 1.75, 'Zn': \n 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.2, 'Se': 2.48, 'Br': 2.74, 'Kr': \n 2.9, 'Rb': 0.89}\nproton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B':\n '005', 'C': '006', 'N': '007', 'O': '008', 'F': '009', 'Ne': '010',\n 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015', 'S':\n '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021',\n 'Ti': '022', 'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co':\n '027', 'Ni': '028', 'Cu': '029', 'Zn': '030', 'Ga': '031', 'Ge': '032',\n 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}\nnumber_proton = dict([[v, k] for k, v in proton_number.items()])\npriority = {'3': '5', '2': '4', '1.5': '3', '6': '2', '5': '1', '1': '0'}\n\n\ndef frac2cart(coords, matrix):\n coords = np.dot(matrix, coords).flatten().tolist()[0]\n return coords\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.\n orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n matrix = get_transform(lst1, lst2, matrix=True)\n adp = source_atom.adp['cart_int']\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\ndef framework_crawler(atom, direction, rigid_group_old=None):\n \"\"\"\n Function to identify atoms belonging to a previosly defined rigid\n group.\n Arguments:\n atom: the name of the first atom of the rigid group.\n direction: the name of the second atom of the rigid group.\n rigid_group_old: used by the function itself for consecutive calls.\n\n Returns a list of atom names belonging to the rigid group.\n \"\"\"\n if not rigid_group_old:\n rigid_group = [atom, direction]\n else:\n rigid_group = rigid_group_old\n for atom in get_framework_neighbours(direction):\n if not atom in rigid_group and not atom.element == 'H':\n rigid_group.append(atom)\n framework_crawler(rigid_group[0], atom, rigid_group)\n if not rigid_group_old:\n return rigid_group\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\ndef reflect_adp(adp, planev):\n \"\"\"\n Returns the ADP after reflection on the plane defined by its normal\n vector 'planev'.\n \"\"\"\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = 2.0 * np.dot(np.array([0, 0, 0]), planev) * planev\n return rotate_adp(adp, M[:3, :3])\n\n\ndef eigenv2tensor(axis):\n \"\"\"\n Calculates the tensor representation of ADP from its priciple axis.\n \"\"\"\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]])\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy\n [2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n M = np.dot(np.dot(Vinv, L), V)\n return M\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\ndef get_rotation_matrix_from_quaternion(q):\n \"\"\"\n Returns the rotation matrix equivalent of the given quaternion.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], \n 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])],\n [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] *\n q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] *\n q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] -\n q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])\n return R\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n distlist.append(atomdict)\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n\n def idfun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\ndef get_influence_atoms(atomlist):\n \"\"\"\n Determines the atoms defining the chemical enviroment of a given atom by checking\n their bonding partners. Only the first and second neighbours are considered.\n \"\"\"\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == 'H':\n neighbours = neighbours[:2]\n if neighbours[0][0] == 'O':\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n if counter < 2 or partner[0] in atom and atom[0][0] == 'H':\n newatom += atom + partner[1:]\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n positions = {}\n keylist = []\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\ndef frac_to_cart(cell, positions):\n \"\"\"\n Transforms a set of given fractional coordinates to cartesian coordinates.\n Needs a list containing the cell parameters as its first argument and the dictionary\n returned by read coordinates().\n Returns a dictionary with cartesian coordinates analog to fractional dictionary.\n \"\"\"\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5\n ] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(\n beta) - np.cos(gamma) * np.cos(gamma) + 2 * np.cos(alpha) * np.cos(\n beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)], [0, \n b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma\n )) / np.sin(gamma)], [0, 0, c * v / np.sin(gamma)]])\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n",
"step-5": "\"\"\"\nCreated on Feb 10, 2013\n\n@author: jens\n\nDeprecated module for crystallogrphy related geometry operations. And a lot\nof other stuff that I put here.\n\"\"\"\n\nimport numpy as np\n\n\natomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8,\n 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,\n 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22,\n 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,\n 'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}\n\ncovalence_radius = {'H': .37, 'He': .0, 'Li': 1.23, 'Be': .90, 'B': .80, 'C': .77,\n 'N': .74, 'O': .71, 'F': .72, 'Ne': 0., 'Na': 1.54, 'Mg': 1.36,\n 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': .99, 'Ar': 0.,\n 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22,\n 'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15,\n 'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20,\n 'Se': 1.16, 'Br': 1.14, 'Kr': 0.,\n 'Rb': 2.18} # , 191, 162, 145, 134, 130, 127, 125, 125, 128, 134, 148, 144, 141, 140, 136, 133, 0, 235, 198, 169, 165, 165, 164, 164, 162, 185, 161, 159, 159, 157, 157, 156, 170, 156, 144, 134, 130, 128, 126, 127, 130, 134, 149, 148, 147, 146, 146, 145, 0, 0, 0, 188, 165, 161, 142, 130, 151, 182, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\nelectro_negativ = {'H': 2.20, 'He': 5.50, 'Li': .97, 'Be': 1.47, 'B': 2.01, 'C': 2.50,\n 'N': 3.07, 'O': 3.50, 'F': 4.40, 'Ne': 4.80, 'Na': 1.01, 'Mg': 1.23,\n 'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': 3.20,\n 'K': .91, 'Ca': 1.04, 'Sc': 1.20, 'Ti': 1.32, 'V': 1.45,\n 'Cr': 1.56, 'Mn': 1.60, 'Fe': 1.64, 'Co': 1.70, 'Ni': 1.75,\n 'Cu': 1.75, 'Zn': 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.20,\n 'Se': 2.48, 'Br': 2.74, 'Kr': 2.90,\n 'Rb': .89} # , 99, 111, 122, 123, 130, 136, 142, 145, 130, 142, 146, 149, 172, 182, 201, 221, 240, 86, 97, 108, 108, 107, 107, 107, 107, 110, 111, 110, 110, 110, 111, 111, 106, 114, 123, 133, 140, 146, 152, 155, 142, 142, 144, 144, 155, 167 }\n\nproton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B': '005', 'C': '006', 'N': '007', 'O': '008',\n 'F': '009', 'Ne': '010', 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015',\n 'S': '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021', 'Ti': '022',\n 'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co': '027', 'Ni': '028', 'Cu': '029',\n 'Zn': '030', 'Ga': '031', 'Ge': '032', 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}\n\nnumber_proton = dict([[v, k] for k, v in proton_number.items()])\n\npriority = {'3': '5',\n '2': '4',\n '1.5': '3',\n '6': '2',\n '5': '1',\n '1': '0'}\n\n\ndef frac2cart(coords, matrix):\n coords = np.dot(matrix, coords).flatten().tolist()[0]\n return coords\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (1. / 3.)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.\n else:\n print('crystgeom: Error: please specify mean as \\'geometric\\' or \\'arithmetic\\'')\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]],\n [adp[3], adp[1], adp[5]],\n [adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1,\n atom_elements_2, atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = (9, None)\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = (dist, j)\n pair_list.append(best_hit[1])\n # ===========================================================================\n # print\n # for i in xrange(len(pair_list)):\n # print atom_atoms_1[i],atom_atoms_2[pair_list[i]]\n #===========================================================================\n return pair_list\n\n\ndef bond_order(bondxi,\n threshold_single_meso=0.0847,\n # ================================================================\n # threshold_meso_double=0.184,\n #================================================================\n threshold_meso_double=0.0847,\n threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\n# ===============================================================================\n# def rotate_3D_symmetric(atom,source_atom):\n# '''\n# Rotates the ADP of 'atom' to match the orientation\n# of 'source_atom.\n# '''\n# cosangle=np.dot(atom.orientation[0],source_atom.orientation[0])\n# angle=np.arccos(cosangle)\n# axis=np.cross(atom.orientation[0],source_atom.orientation[0])\n# axis=axis/np.linalg.norm(axis)\n# matrix=get_3drotation_matrix(axis,angle)\n# orientation0_new=np.dot(source_atom.orientation[0],matrix)\n# if np.linalg.norm(orientation0_new-atom.orientation[0])<0.00001:\n# pass\n# else:\n# angle=angle*-1\n# matrix=get_3drotation_matrix(axis,angle)\n#\n# atom.adp['cart_int']=rotate_adp(source_atom.adp['cart_int'],matrix)\n#===============================================================================\n\n\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return (float(covalence_radius[element1]) + float(covalence_radius[element2]) -\n (0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance)\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\ndef framework_crawler(atom, direction, rigid_group_old=None):\n \"\"\"\n Function to identify atoms belonging to a previosly defined rigid\n group.\n Arguments:\n atom: the name of the first atom of the rigid group.\n direction: the name of the second atom of the rigid group.\n rigid_group_old: used by the function itself for consecutive calls.\n\n Returns a list of atom names belonging to the rigid group.\n \"\"\"\n if not rigid_group_old:\n rigid_group = [atom, direction]\n else:\n rigid_group = rigid_group_old\n for atom in get_framework_neighbours(direction):\n if not atom in rigid_group and not atom.element == 'H':\n rigid_group.append(atom)\n framework_crawler(rigid_group[0], atom, rigid_group)\n if not rigid_group_old:\n #=======================================================================\n # print ' Determined rigid group:', [i.name for i in rigid_group]\n #=======================================================================\n return rigid_group\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\n#===============================================================================\n# def get_framework_neighbours(atom,useH=True):\n# \"\"\"\n# Needs a classes.atom instance as argument.\n# Returns the names of the framework atoms bound to that atom.\n# \"\"\"\n# neighbourlist=[]\n# for atom2 in atom.partner[atom.molecule.name][1:5]:\n# #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n# if np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n# if not 'H(' in atom2.name or useH:\n# neighbourlist.append(atom2)\n# return neighbourlist\n#===============================================================================\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print('WARNING!!! Inconsistend number of floats while\\\n reading measured ADP.')\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n # if use == 'cart_neut': print(atom)\n atom.adp[use] = rotate_adp2(atom.adp[use2],\n atom.molecule.frac2cartmatrix,\n atom.molecule.cell)\n return data\n\n\ndef reflect_adp(adp, planev):\n \"\"\"\n Returns the ADP after reflection on the plane defined by its normal\n vector 'planev'.\n \"\"\"\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])\n\n\ndef eigenv2tensor(axis):\n \"\"\"\n Calculates the tensor representation of ADP from its priciple axis.\n \"\"\"\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M\n\n\n#===============================================================================\n#\n#\n# def get_general_distances(coordlist1,coordlist2,atomlist1,atomlist2):\n# \"\"\"\n# Calculates a distance dictionary between two sets of atoms.\n# Returns a dictionary entry for every atom in atomlist1 with the inter atom\n# distances and the corresponding atom name keyed to their atom type.\n#\n# This function is used by the get_best_point() function.\n# \"\"\"\n# maindict={}\n# for i in xrange(len(atomlist1)):\n# distdict={}\n# for j in xrange(len(atomlist2)):\n# if not atomlist2[j][0] in distdict.keys():\n# distdict[atomlist2[j][0]]=[[np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]]\n# else:\n# distdict[atomlist2[j][0]].append([np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]])\n# ## print atomlist1[i],'aaaaaaaaaaa'\n# maindict[atomlist1[i]]=distdict\n# return maindict\n#===============================================================================\n\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n\n N = np.matrix([[N11, N12, N13, N14],\n [N21, N22, N23, N24],\n [N31, N32, N33, N34],\n [N41, N42, N43, N44]])\n\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1, ).tolist()\n return quat, max(w)\n\n\ndef get_rotation_matrix_from_quaternion(q):\n \"\"\"\n Returns the rotation matrix equivalent of the given quaternion.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],\n 2 * (q[1] * q[2] - q[0] * q[3]),\n 2 * (q[1] * q[3] + q[0] * q[2])],\n [2 * (q[2] * q[1] + q[0] * q[3]),\n q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],\n 2 * (q[2] * q[3] - q[0] * q[1])],\n [2 * (q[3] * q[1] - q[0] * q[2]),\n 2 * (q[3] * q[2] + q[0] * q[1]),\n q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])\n return R\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1, ).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line: break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n def idfun(x): return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n\n if marker in seen: continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\ndef get_influence_atoms(atomlist):\n \"\"\"\n Determines the atoms defining the chemical enviroment of a given atom by checking\n their bonding partners. Only the first and second neighbours are considered.\n \"\"\"\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n##def read_coordinates(path=''):\n## \"\"\"\n## Reads the cell parameters from a 'xd.mas' file and the atomic positions\n## from a 'xd.res' file.\n## The function returns a list with the cell parameters and an dictionary which\n## keys the atom name to its fractional coordinates.\n## \"\"\"\n## maspointer=open(path+'xd.mas','r')\n## respointer=open(path+'xd.res','r')\n## positions={}\n## keylist=[] #Needed to keep the atomlist order. This is important for the frequency read function.\n## for line in maspointer.readlines():\n## if 'CELL' in line:\n## cell=[float(i) for i in line.split(\" \") if '.' in i]\n## for line in respointer.readlines():\n## if '(' in line and not '!' in line:\n## coords=[float(i) for i in line.split(\" \") if '.' in i]\n## coords=coords[:-1]\n## key=line.split(\" \")[0]\n## keylist.append(key)\n## positions[key]=coords\n## sortkeylist=[]\n## for i in xrange(len(keylist)):\n## j=i+1\n## for key in keylist:\n## if j==int(key[2:-1]):\n## sortkeylist.append(key)\n## return cell,positions,sortkeylist\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\ndef frac_to_cart(cell, positions):\n \"\"\"\n Transforms a set of given fractional coordinates to cartesian coordinates.\n Needs a list containing the cell parameters as its first argument and the dictionary\n returned by read coordinates().\n Returns a dictionary with cartesian coordinates analog to fractional dictionary.\n \"\"\"\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \\\n + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],\n [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],\n [0, 0, c * v / np.sin(gamma)]])\n\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\n#===============================================================================\n# def link_atoms(gatomlist,xatomdict):\n# \"\"\"\n# Returns a list of pairs of equivalten atoms.\n# \"\"\"\n# linklist=[]\n# keylist=xatomdict.keys()\n# for atom in xrange(len(gatomlist)):\n# for key in keylist:\n# if int(key)==atom+1:\n# linklistline=[atomlist[atom][1],xatomdict[key]]\n# linklist.append(linklistline)\n# break\n# return linklist\n#===============================================================================\n\n#===============================================================================\n# def get_random_plane(linklist):\n# \"\"\"\n# Randomly picks three atoms to build a plane from.\n# \"\"\"\n# planepoints=random.sample(linklist,3)\n# gplanenorm=get_normal_vector_of_plane(planepoints[0][0],planepoints[1][0],planepoints[2][0])\n# gplanedir=np.linalg.norm(planepoints[0][0]-planepoints[1][0])\n# xplanenorm=get_normal_vector_of_plane(planepoints[0][1],planepoints[1][1],planepoints[2][1])\n# xdplanedir=np.linalg.norm(planepoints[0][1]-planepoints[1][1])\n# return gplanenorm,xplanenorm\n#===============================================================================\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n\n\n",
"step-ids": [
23,
33,
40,
50,
51
]
}
|
[
23,
33,
40,
50,
51
] |
from rest_framework import serializers
from .models import *
__all__ = (
'CatalogCoinListSerializer', 'CatalogCoinSerializer', 'SeriesListSerializer', 'CoinListSerializer',
'CoinSerializer', 'CountriesListSerializer',
)
class CountriesListSerializer(serializers.ModelSerializer):
class Meta:
model = Country
fields = ('name', 'flag',)
class SeriesListSerializer(serializers.ModelSerializer):
class Meta:
model = Serie
fields = ('name',)
class CatalogCoinListSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogCoin
fields = (
'id', 'face_value', 'currency', 'country', 'year', 'theme', 'mint', 'serie', 'collection', 'exchange',
'wishlist',
)
serie = serializers.SlugRelatedField(slug_field='name', read_only=True)
collection = serializers.IntegerField(read_only=True)
exchange = serializers.IntegerField(read_only=True)
wishlist = serializers.IntegerField(read_only=True)
class CatalogCoinSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogCoin
fields = '__all__'
class CoinListSerializer(serializers.ModelSerializer):
class Meta:
model = Coin
fields = ('id', 'catalog_coin', 'owner', 'status',)
catalog_coin = CatalogCoinListSerializer()
class CoinSerializer(serializers.ModelSerializer):
class Meta:
model = Coin
fields = '__all__'
|
normal
|
{
"blob_id": "b77da75b01e96ff89f873f4c5764a62cf68cd576",
"index": 217,
"step-1": "<mask token>\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-3": "<mask token>\n__all__ = ('CatalogCoinListSerializer', 'CatalogCoinSerializer',\n 'SeriesListSerializer', 'CoinListSerializer', 'CoinSerializer',\n 'CountriesListSerializer')\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom .models import *\n__all__ = ('CatalogCoinListSerializer', 'CatalogCoinSerializer',\n 'SeriesListSerializer', 'CoinListSerializer', 'CoinSerializer',\n 'CountriesListSerializer')\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-5": "from rest_framework import serializers\n\nfrom .models import *\n\n__all__ = (\n 'CatalogCoinListSerializer', 'CatalogCoinSerializer', 'SeriesListSerializer', 'CoinListSerializer',\n 'CoinSerializer', 'CountriesListSerializer',\n)\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Country\n fields = ('name', 'flag',)\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Serie\n fields = ('name',)\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n class Meta:\n model = CatalogCoin\n fields = (\n 'id', 'face_value', 'currency', 'country', 'year', 'theme', 'mint', 'serie', 'collection', 'exchange',\n 'wishlist',\n )\n\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Coin\n fields = ('id', 'catalog_coin', 'owner', 'status',)\n\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
<|reserved_special_token_0|>
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
<|reserved_special_token_0|>
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
<|reserved_special_token_0|>
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
<|reserved_special_token_0|>
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')
max_product_hor = 0
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
max_product_ver = 0
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
max_product_dia = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
max_product_dia_2 = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
max_value = max(max_product_hor, max_product_ver, max_product_dia,
max_product_dia_2)
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
<|reserved_special_token_1|>
import numpy as np
data = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')
max_product_hor = 0
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
max_product_ver = 0
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
max_product_dia = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
max_product_dia_2 = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
max_value = max(max_product_hor, max_product_ver, max_product_dia,
max_product_dia_2)
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
<|reserved_special_token_1|>
# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction
# (up, down, left, right, or diagonally) in the 20×20 grid?
import numpy as np
data = np.genfromtxt("problem_11_matrix.txt", delimiter=" ")
# find greatest product horizontally
max_product_hor = 0
for i in range(0, len(data[0, :])-3):
for j in range(0, len(data[0, :])-3):
product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]
if product_hor > max_product_hor:
max_product_hor = product_hor
# print("The greatest product horizontally is {}. " .format(max_product_hor))
# find greatest product vertically
max_product_ver = 0
for i in range(0, len(data[:, 0])-3):
for j in range(0, len(data[:, 0])-3):
product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
# print("The greatest product vertically is {}. " .format(max_product_ver))
# find greatest product diagonally
max_product_dia = 0
for j in range(0, len(data[0, :])-3):
for i in range(0, len(data[0, :])-3):
product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]
if product_dia > max_product_dia:
max_product_dia = product_dia
# print("The greatest product diagonally is {}. " .format(max_product_dia))
max_product_dia_2 = 0
for j in range(0, len(data[0, :])-3):
for i in range(2, len(data[0, :])-1):
product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
# print("The greatest product diagonally is {}. " .format(max_product_dia_2))
max_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)
print("The greatest product in the same direction is {}." .format(int(max_value)))
|
flexible
|
{
"blob_id": "bacaaf5c91232d85f451c2c17a42cd2ec6966684",
"index": 1499,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n<mask token>\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n<mask token>\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n<mask token>\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-3": "<mask token>\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-4": "import numpy as np\ndata = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')\nmax_product_hor = 0\nfor i in range(0, len(data[0, :]) - 3):\n for j in range(0, len(data[0, :]) - 3):\n product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,\n i + 3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0]) - 3):\n for j in range(0, len(data[:, 0]) - 3):\n product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +\n 3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\nmax_product_dia = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(0, len(data[0, :]) - 3):\n product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2\n ] * data[i + 3, j + 3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :]) - 3):\n for i in range(2, len(data[0, :]) - 1):\n product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2\n ] * data[i - 3, j + 3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\nmax_value = max(max_product_hor, max_product_ver, max_product_dia,\n max_product_dia_2)\nprint('The greatest product in the same direction is {}.'.format(int(\n max_value)))\n",
"step-5": "# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.\n# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.\n# What is the greatest product of four adjacent numbers in the same direction\n# (up, down, left, right, or diagonally) in the 20×20 grid?\n\nimport numpy as np\ndata = np.genfromtxt(\"problem_11_matrix.txt\", delimiter=\" \")\n\n# find greatest product horizontally\nmax_product_hor = 0\nfor i in range(0, len(data[0, :])-3):\n for j in range(0, len(data[0, :])-3):\n product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]\n if product_hor > max_product_hor:\n max_product_hor = product_hor\n# print(\"The greatest product horizontally is {}. \" .format(max_product_hor))\n\n# find greatest product vertically\nmax_product_ver = 0\nfor i in range(0, len(data[:, 0])-3):\n for j in range(0, len(data[:, 0])-3):\n product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]\n if product_ver > max_product_ver:\n max_product_ver = product_ver\n# print(\"The greatest product vertically is {}. \" .format(max_product_ver))\n\n# find greatest product diagonally\nmax_product_dia = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(0, len(data[0, :])-3):\n product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]\n if product_dia > max_product_dia:\n max_product_dia = product_dia\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia))\n\nmax_product_dia_2 = 0\nfor j in range(0, len(data[0, :])-3):\n for i in range(2, len(data[0, :])-1):\n product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]\n if product_dia_2 > max_product_dia_2:\n max_product_dia_2 = product_dia_2\n# print(\"The greatest product diagonally is {}. \" .format(max_product_dia_2))\n\nmax_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)\n\nprint(\"The greatest product in the same direction is {}.\" .format(int(max_value)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MyNode(at.Node):
separator = '|'
def test_render():
"""Render string cast."""
root = MyNode('root')
s0 = MyNode('sub0', parent=root)
MyNode('sub0B', parent=s0)
MyNode('sub0A', parent=s0)
MyNode('sub1', parent=root)
r = at.RenderTree(root)
expected = '\n'.join(["MyNode('|root')", "├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')", "└── MyNode('|root|sub1')"])
if six.PY2:
eq_(str(r).decode('utf-8'), expected)
else:
eq_(str(r), expected)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyNode(at.Node):
separator = '|'
def test_render():
"""Render string cast."""
root = MyNode('root')
s0 = MyNode('sub0', parent=root)
MyNode('sub0B', parent=s0)
MyNode('sub0A', parent=s0)
MyNode('sub1', parent=root)
r = at.RenderTree(root)
expected = '\n'.join(["MyNode('|root')", "├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')", "└── MyNode('|root|sub1')"])
if six.PY2:
eq_(str(r).decode('utf-8'), expected)
else:
eq_(str(r), expected)
<|reserved_special_token_0|>
def test_glob():
"""Wildcard."""
top = MyNode('top', parent=None)
sub0 = MyNode('sub0', parent=top)
sub0sub0 = MyNode('sub0', parent=sub0)
sub0sub1 = MyNode('sub1', parent=sub0)
sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)
MyNode('sub1', parent=sub0sub1)
sub1 = MyNode('sub1', parent=top)
sub1sub0 = MyNode('sub0', parent=sub1)
r = at.Resolver()
eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])
eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])
eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError,
"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, 'sub1|sub1')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyNode(at.Node):
separator = '|'
def test_render():
"""Render string cast."""
root = MyNode('root')
s0 = MyNode('sub0', parent=root)
MyNode('sub0B', parent=s0)
MyNode('sub0A', parent=s0)
MyNode('sub1', parent=root)
r = at.RenderTree(root)
expected = '\n'.join(["MyNode('|root')", "├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')", "└── MyNode('|root|sub1')"])
if six.PY2:
eq_(str(r).decode('utf-8'), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode('top', parent=None)
sub0 = MyNode('sub0', parent=top)
sub0sub0 = MyNode('sub0sub0', parent=sub0)
sub0sub1 = MyNode('sub0sub1', parent=sub0)
sub1 = MyNode('sub1', parent=top)
r = at.Resolver('name')
eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)
eq_(r.get(sub1, '..'), top)
eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)
eq_(r.get(sub1, '.'), sub1)
eq_(r.get(sub1, ''), sub1)
with assert_raises(at.ChildResolverError,
"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, 'sub2')
eq_(r.get(sub0sub0, '|top'), top)
eq_(r.get(sub0sub0, '|top|sub0'), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, '|')
with assert_raises(at.ResolverError,
"unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, '|bar')
def test_glob():
"""Wildcard."""
top = MyNode('top', parent=None)
sub0 = MyNode('sub0', parent=top)
sub0sub0 = MyNode('sub0', parent=sub0)
sub0sub1 = MyNode('sub1', parent=sub0)
sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)
MyNode('sub1', parent=sub0sub1)
sub1 = MyNode('sub1', parent=top)
sub1sub0 = MyNode('sub0', parent=sub1)
r = at.Resolver()
eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])
eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])
eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError,
"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, 'sub1|sub1')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import six
from helper import assert_raises, eq_
import anytree as at
class MyNode(at.Node):
separator = '|'
def test_render():
"""Render string cast."""
root = MyNode('root')
s0 = MyNode('sub0', parent=root)
MyNode('sub0B', parent=s0)
MyNode('sub0A', parent=s0)
MyNode('sub1', parent=root)
r = at.RenderTree(root)
expected = '\n'.join(["MyNode('|root')", "├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')", "└── MyNode('|root|sub1')"])
if six.PY2:
eq_(str(r).decode('utf-8'), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode('top', parent=None)
sub0 = MyNode('sub0', parent=top)
sub0sub0 = MyNode('sub0sub0', parent=sub0)
sub0sub1 = MyNode('sub0sub1', parent=sub0)
sub1 = MyNode('sub1', parent=top)
r = at.Resolver('name')
eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)
eq_(r.get(sub1, '..'), top)
eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)
eq_(r.get(sub1, '.'), sub1)
eq_(r.get(sub1, ''), sub1)
with assert_raises(at.ChildResolverError,
"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, 'sub2')
eq_(r.get(sub0sub0, '|top'), top)
eq_(r.get(sub0sub0, '|top|sub0'), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, '|')
with assert_raises(at.ResolverError,
"unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, '|bar')
def test_glob():
"""Wildcard."""
top = MyNode('top', parent=None)
sub0 = MyNode('sub0', parent=top)
sub0sub0 = MyNode('sub0', parent=sub0)
sub0sub1 = MyNode('sub1', parent=sub0)
sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)
MyNode('sub1', parent=sub0sub1)
sub1 = MyNode('sub1', parent=top)
sub1sub0 = MyNode('sub0', parent=sub1)
r = at.Resolver()
eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])
eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])
eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError,
"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, 'sub1|sub1')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Test custom node separator."""
import six
from helper import assert_raises, eq_
import anytree as at
class MyNode(at.Node):
separator = "|"
def test_render():
"""Render string cast."""
root = MyNode("root")
s0 = MyNode("sub0", parent=root)
MyNode("sub0B", parent=s0)
MyNode("sub0A", parent=s0)
MyNode("sub1", parent=root)
r = at.RenderTree(root)
expected = "\n".join(
[
"MyNode('|root')",
"├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')",
"└── MyNode('|root|sub1')",
]
)
if six.PY2:
eq_(str(r).decode("utf-8"), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0sub0", parent=sub0)
sub0sub1 = MyNode("sub0sub1", parent=sub0)
sub1 = MyNode("sub1", parent=top)
r = at.Resolver("name")
eq_(r.get(top, "sub0|sub0sub0"), sub0sub0)
eq_(r.get(sub1, ".."), top)
eq_(r.get(sub1, "..|sub0|sub0sub1"), sub0sub1)
eq_(r.get(sub1, "."), sub1)
eq_(r.get(sub1, ""), sub1)
with assert_raises(at.ChildResolverError, "MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, "sub2")
eq_(r.get(sub0sub0, "|top"), top)
eq_(r.get(sub0sub0, "|top|sub0"), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, "|")
with assert_raises(at.ResolverError, "unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, "|bar")
def test_glob():
"""Wildcard."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0", parent=sub0)
sub0sub1 = MyNode("sub1", parent=sub0)
sub0sub1sub0 = MyNode("sub0", parent=sub0sub1)
MyNode("sub1", parent=sub0sub1)
sub1 = MyNode("sub1", parent=top)
sub1sub0 = MyNode("sub0", parent=sub1)
r = at.Resolver()
eq_(r.glob(top, "*|*|sub0"), [sub0sub1sub0])
eq_(r.glob(top, "sub0|sub?"), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, "..|.|*"), [sub0, sub1])
eq_(r.glob(top, "*|*"), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, "*|sub0"), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError, "MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, "sub1|sub1")
|
flexible
|
{
"blob_id": "a430b4629ee06dbfb267f839599383624e37451e",
"index": 4582,
"step-1": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-3": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-4": "<mask token>\nimport six\nfrom helper import assert_raises, eq_\nimport anytree as at\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test custom node separator.\"\"\"\n\nimport six\nfrom helper import assert_raises, eq_\n\nimport anytree as at\n\n\nclass MyNode(at.Node):\n\n separator = \"|\"\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode(\"root\")\n s0 = MyNode(\"sub0\", parent=root)\n MyNode(\"sub0B\", parent=s0)\n MyNode(\"sub0A\", parent=s0)\n MyNode(\"sub1\", parent=root)\n r = at.RenderTree(root)\n\n expected = \"\\n\".join(\n [\n \"MyNode('|root')\",\n \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\",\n \"└── MyNode('|root|sub1')\",\n ]\n )\n if six.PY2:\n eq_(str(r).decode(\"utf-8\"), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub0sub1\", parent=sub0)\n sub1 = MyNode(\"sub1\", parent=top)\n r = at.Resolver(\"name\")\n eq_(r.get(top, \"sub0|sub0sub0\"), sub0sub0)\n eq_(r.get(sub1, \"..\"), top)\n eq_(r.get(sub1, \"..|sub0|sub0sub1\"), sub0sub1)\n eq_(r.get(sub1, \".\"), sub1)\n eq_(r.get(sub1, \"\"), sub1)\n with assert_raises(at.ChildResolverError, \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, \"sub2\")\n eq_(r.get(sub0sub0, \"|top\"), top)\n eq_(r.get(sub0sub0, \"|top|sub0\"), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, \"|\")\n with assert_raises(at.ResolverError, \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, \"|bar\")\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub1\", parent=sub0)\n sub0sub1sub0 = MyNode(\"sub0\", parent=sub0sub1)\n MyNode(\"sub1\", parent=sub0sub1)\n sub1 = MyNode(\"sub1\", parent=top)\n sub1sub0 = MyNode(\"sub0\", parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, \"*|*|sub0\"), [sub0sub1sub0])\n\n eq_(r.glob(top, \"sub0|sub?\"), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, \"..|.|*\"), [sub0, sub1])\n eq_(r.glob(top, \"*|*\"), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, \"*|sub0\"), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError, \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, \"sub1|sub1\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class mysql(MakePackage):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class mysql(MakePackage):
dependencies = ['cmake']
fetch = (
'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'
)
config = (
'cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'
)
<|reserved_special_token_1|>
from package import *
class mysql(MakePackage):
dependencies = ['cmake']
fetch = (
'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'
)
config = (
'cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'
)
<|reserved_special_token_1|>
from package import *
class mysql(MakePackage):
dependencies = ["cmake"]
fetch="http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/"
config='cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'
|
flexible
|
{
"blob_id": "ec90c731a0e546d9d399cbb68c92be1acca8cbe0",
"index": 518,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass mysql(MakePackage):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"step-4": "from package import *\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"step-5": "\nfrom package import *\n\nclass mysql(MakePackage):\n dependencies = [\"cmake\"]\n fetch=\"http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/\"\n config='cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/env python3
import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os
print ('Content-type: text/html\n')
form = cgi.FieldStorage()
#database connection
user = "i494f18_team34"
db_pass = "my+sql=i494f18_team34"
db_con = MySQLdb.connect(host="db.soic.indiana.edu", port = 3306, user=user, passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user','')
userName = ""
userID = ""
if "echooUser" in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
#change the status of veriable
if userName != "":
if EchooFunctions.checkUserType(cursor, userName) == "administrator":
admin = True
#main contents to insert
friend = ""
friendList = ""
chatroom = ""
userList = []
if userID != "" and receiverID !="":
try:
SQL = "select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(userID)+" and m.sender = "+str(receiverID)
SQL+= " Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(receiverID)+" and m.sender = "+str(userID)
SQL+=" Order By messageID ;"
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, "Error:", e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar=row[3]
specialChar2 = ""
specialChar=EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count<=20:
specialChar2 += x
word_count+=1
else:
specialChar2 += x +"<p>"
word_count = 0
if count >= 5:
chatroom+='<li class="chatDate">'+str(row[4])+'</li>'
count=0
if str(row[0]) ==str(userID):
count+=1
chatroom+='<li class="mainUser">'+'<a href="userProfile.cgi?user='+str(row[0])+'">'+row[1]+'</a><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<br><div class="messageLine">'+specialChar2+'</div></li>'
else:
count+=1
chatroom+='<li class="otherUser"><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<a href="userProfile.cgi?userid='+str(row[0])+'">'+row[1]+'</a><br><div class="messageLine">'+specialChar2+'</div></li>'
if userID == "" or receiverID =="":
content ="""<p>You don't have right access to this page</p>
<a href='index.cgi'></a>"""
print(content)
print(chatroom)
|
normal
|
{
"blob_id": "dc88686d3cbb4223b4de6847bf4fc29b93054b00",
"index": 495,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Content-type: text/html\\n')\n<mask token>\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n<mask token>\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\n<mask token>\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-3": "<mask token>\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-4": "import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-5": "#! /usr/bin/env python3\n\nimport EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint ('Content-type: text/html\\n')\n\nform = cgi.FieldStorage()\n\n#database connection\nuser = \"i494f18_team34\"\ndb_pass = \"my+sql=i494f18_team34\"\ndb_con = MySQLdb.connect(host=\"db.soic.indiana.edu\", port = 3306, user=user, passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user','')\nuserName = \"\"\nuserID = \"\"\nif \"echooUser\" in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n\nadmin = False\n#change the status of veriable\nif userName != \"\":\n if EchooFunctions.checkUserType(cursor, userName) == \"administrator\":\n admin = True\n#main contents to insert\nfriend = \"\"\nfriendList = \"\"\nchatroom = \"\"\nuserList = []\nif userID != \"\" and receiverID !=\"\":\n try:\n SQL = \"select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(userID)+\" and m.sender = \"+str(receiverID)\n SQL+= \" Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(receiverID)+\" and m.sender = \"+str(userID)\n SQL+=\" Order By messageID ;\"\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, \"Error:\", e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar=row[3]\n specialChar2 = \"\"\n specialChar=EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count<=20:\n specialChar2 += x\n word_count+=1\n else:\n specialChar2 += x +\"<p>\"\n word_count = 0\n if count >= 5:\n chatroom+='<li class=\"chatDate\">'+str(row[4])+'</li>'\n count=0\n if str(row[0]) ==str(userID):\n count+=1\n chatroom+='<li class=\"mainUser\">'+'<a href=\"userProfile.cgi?user='+str(row[0])+'\">'+row[1]+'</a><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n else:\n count+=1\n chatroom+='<li class=\"otherUser\"><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<a href=\"userProfile.cgi?userid='+str(row[0])+'\">'+row[1]+'</a><br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n\nif userID == \"\" or receiverID ==\"\":\n content =\"\"\"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>\"\"\"\n print(content)\nprint(chatroom)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from odoo import models, fields, api, _
class SaleAdvancePaymentInv(models.TransientModel):
_inherit = "sale.advance.payment.inv"
date_start_invoice_timesheet = fields.Date(
string='Start Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
date_end_invoice_timesheet = fields.Date(
string='End Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
|
normal
|
{
"blob_id": "75b1674066958a8fa28e74121a35d688bcc473d9",
"index": 9743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = 'sale.advance.payment.inv'\n date_start_invoice_timesheet = fields.Date(string='Start Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n date_end_invoice_timesheet = fields.Date(string='End Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n",
"step-4": "from odoo import models, fields, api, _\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = 'sale.advance.payment.inv'\n date_start_invoice_timesheet = fields.Date(string='Start Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n date_end_invoice_timesheet = fields.Date(string='End Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n",
"step-5": "from odoo import models, fields, api, _\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = \"sale.advance.payment.inv\"\n\n date_start_invoice_timesheet = fields.Date(\n string='Start Date',\n help=\"Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. \"\n \"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will \"\n \"be invoiced without distinction.\", required=True)\n date_end_invoice_timesheet = fields.Date(\n string='End Date',\n help=\"Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. \"\n \"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will \"\n \"be invoiced without distinction.\", required=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/env python3
import os
import requests
# import json
external_ip = "xx"
data_path = "/data/feedback/"
url = "http://{}/feedback/".format(external_ip)
def read():
# read file
file_list = os.listdir(data_path)
result_list = []
for file in file_list:
with open(data_path + file) as f:
# read line, title, name, date, feedback
content = f.readlines()
# envolope to dictionary
dict = {}
dict["title"] = content[0]
dict["name"] = content[1]
dict["date"] = content[2]
dict["feedback"] = content[3]
result_list.append(dict)
f.close()
return result_list
def send(list):
for dict in list:
response = requests.post(url, json=dict)
if(response.status_code == 200):
forDEBUG("SEND_SUCC", dict["title"])
else:
forDEBUG("SEND_FAIL", dict["title"])
def forDEBUG(p1, p2):
print("DEBUG:: {}, {}".format(p1, p2))
def action():
plist = read()
send(plist)
action()
|
normal
|
{
"blob_id": "6f1bb9fde9ed9667ab81baa9e8ec965d711a0556",
"index": 9853,
"step-1": "<mask token>\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n",
"step-3": "<mask token>\nexternal_ip = 'xx'\ndata_path = '/data/feedback/'\nurl = 'http://{}/feedback/'.format(external_ip)\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n",
"step-4": "import os\nimport requests\nexternal_ip = 'xx'\ndata_path = '/data/feedback/'\nurl = 'http://{}/feedback/'.format(external_ip)\n\n\ndef read():\n file_list = os.listdir(data_path)\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n content = f.readlines()\n dict = {}\n dict['title'] = content[0]\n dict['name'] = content[1]\n dict['date'] = content[2]\n dict['feedback'] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n\ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if response.status_code == 200:\n forDEBUG('SEND_SUCC', dict['title'])\n else:\n forDEBUG('SEND_FAIL', dict['title'])\n\n\ndef forDEBUG(p1, p2):\n print('DEBUG:: {}, {}'.format(p1, p2))\n\n\ndef action():\n plist = read()\n send(plist)\n\n\naction()\n",
"step-5": "#! /usr/bin/env python3\n\nimport os\nimport requests\n# import json\n\nexternal_ip = \"xx\"\ndata_path = \"/data/feedback/\"\nurl = \"http://{}/feedback/\".format(external_ip)\n\ndef read():\n # read file\n file_list = os.listdir(data_path)\n\n result_list = []\n for file in file_list:\n with open(data_path + file) as f:\n # read line, title, name, date, feedback\n content = f.readlines()\n # envolope to dictionary\n dict = {}\n dict[\"title\"] = content[0]\n dict[\"name\"] = content[1]\n dict[\"date\"] = content[2]\n dict[\"feedback\"] = content[3]\n result_list.append(dict)\n f.close()\n return result_list\n\n \ndef send(list):\n for dict in list:\n response = requests.post(url, json=dict)\n if(response.status_code == 200):\n forDEBUG(\"SEND_SUCC\", dict[\"title\"])\n else:\n forDEBUG(\"SEND_FAIL\", dict[\"title\"])\n\ndef forDEBUG(p1, p2):\n print(\"DEBUG:: {}, {}\".format(p1, p2))\n\ndef action():\n plist = read()\n send(plist)\n\naction()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from typing import Any, Callable, Generator, List, Optional
import pytest
from _pytest import nodes
from _pytest.config import hookimpl
from _pytest.python import Function, PyCollector # type: ignore
from hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports
from .._hypothesis import create_test
from ..exceptions import InvalidSchema
from ..models import Endpoint
from ..utils import is_schemathesis_test
@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional["SchemathesisCase"]:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test # type: ignore
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) -> str:
return f"{self.name}[{endpoint.method}:{endpoint.path}]"
def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda: pytest.fail("Invalid schema for endpoint")
items = self.ihook.pytest_pycollect_makeitem(
collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item
)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) -> List[Function]: # type: ignore
"""Generate different test items for all endpoints available in the given schema."""
try:
return [
item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)
]
except Exception:
pytest.fail("Error during collection")
@hookimpl(hookwrapper=True) # pragma: no mutate
def pytest_pyfunc_call(pyfuncitem): # type:ignore
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
|
normal
|
{
"blob_id": "2060f0af351c1487f8aa45943dbaa050f4291c58",
"index": 7791,
"step-1": "<mask token>\n\n\nclass SchemathesisCase(PyCollector):\n <mask token>\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-4": "from typing import Any, Callable, Generator, List, Optional\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector\nfrom hypothesis.errors import InvalidArgument\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-5": "from typing import Any, Callable, Generator, List, Optional\n\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector # type: ignore\nfrom hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports\n\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional[\"SchemathesisCase\"]:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test # type: ignore\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) -> str:\n return f\"{self.name}[{endpoint.method}:{endpoint.path}]\"\n\n def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda: pytest.fail(\"Invalid schema for endpoint\")\n items = self.ihook.pytest_pycollect_makeitem(\n collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item\n )\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) -> List[Function]: # type: ignore\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [\n item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)\n ]\n except Exception:\n pytest.fail(\"Error during collection\")\n\n\n@hookimpl(hookwrapper=True) # pragma: no mutate\ndef pytest_pyfunc_call(pyfuncitem): # type:ignore\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# ?????
c=0
for i in range(12):
if 'r' in input():
c+=1
# ??
print(c)
|
normal
|
{
"blob_id": "294b0dc7587ecd37887591da5a1afe96a4349f6b",
"index": 8711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-3": "c = 0\nfor i in range(12):\n if 'r' in input():\n c += 1\nprint(c)\n",
"step-4": "# ?????\r\nc=0\r\n\r\nfor i in range(12):\r\n if 'r' in input():\r\n c+=1\r\n\r\n# ??\r\nprint(c)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import sys
from os import listdir, makedirs, unlink
from os.path import isdir, join, isfile, exists
from shutil import copy
import random
def clearDirectory( path ):#将dataset里面的文件都删除
for the_file in listdir(path):
file_path = join(path, the_file)
try:
if isfile(file_path):
unlink(file_path)
except Exception, e:
print e
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'argument list:', str(sys.argv)
if len(sys.argv) < 3:
print 'Arguments is not enough! You need use the dataset and test category.'
sys.exit();
datasetPath = sys.argv[1]
category = sys.argv[2]
if len(sys.argv) > 3:
trainingNum = (int)(sys.argv[3])
else:
trainingNum = 0
if len(sys.argv) > 4:
testingNum = (int)(sys.argv[4])
else:
testingNum = 0
print 'dataset is ', datasetPath, ' and category is ', category
categories = [f for f in listdir(datasetPath) if isdir(join(datasetPath, f))]
if category not in categories:
print 'category is not in the dataset please check that'
sys.exit();
print 'start generating training and testing file...'
categoryPath = datasetPath + '/' + category
categoryFiles = [f for f in listdir(categoryPath) if isfile(join(categoryPath,f))]
print category, 'contains ', len(categoryFiles) , 'file'
otherCategories = [x for x in categories if x != category]
otherCategoriesFiles = [y + '/' + x for y in otherCategories for x in listdir(datasetPath + '/' + y)]
defaultNum = (int)(len(categoryFiles))
if trainingNum <= 0:
trainingNum = defaultNum
elif trainingNum > defaultNum:
trainingNum = defaultNum
if testingNum <= 0:
testingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)
elif testingNum > min(defaultNum / 2, len(categoryFiles) - testingNum):
testingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)
print 'trainingNum is', trainingNum
print 'testingNum is', testingNum
rand_smpl = [ categoryFiles[i] for i in sorted(random.sample(xrange(len(categoryFiles)), trainingNum)) ]
test_files = [x for x in categoryFiles if x not in rand_smpl]
test_smpl = [test_files[i] for i in random.sample(xrange(len(test_files)), testingNum)]
trainingDir = 'dataset/training'
testingDir = 'dataset/testing'
if not exists(trainingDir):
makedirs(trainingDir)
if not exists(testingDir):
makedirs(testingDir)
clearDirectory(trainingDir)
clearDirectory(testingDir)
text_file = open("training.txt", "w")
trainingIndex = 1
for jpgfile in rand_smpl:
filepath = categoryPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/training/' + outputFilePath + ' 1\n')
copy(filepath, trainingDir + '/' + outputFilePath)
trainingIndex += 1
training_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), trainingNum)]
for jpgfile in training_smpl:
filepath = datasetPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/training/' + outputFilePath + ' 0\n')
copy(filepath, trainingDir + '/' + outputFilePath)
trainingIndex += 1
text_file.close()
text_file = open("testing.txt", "w")
trainingIndex = 1
for jpgfile in test_smpl:
filepath = categoryPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/testing/' + outputFilePath + ' 1\n')
copy(filepath, testingDir + '/' + outputFilePath)
trainingIndex += 1
testing_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), testingNum)]
for jpgfile in testing_smpl:
filepath = datasetPath + '/' + jpgfile
outputFilePath = 'image_' + str(trainingIndex) + '.jpg'
text_file.write('dataset/testing/' + outputFilePath + ' 0\n')
copy(filepath, testingDir + '/' + outputFilePath)
trainingIndex += 1
text_file.close()
|
normal
|
{
"blob_id": "a714ac227d5185d7b4a932695ba6698e18d96341",
"index": 629,
"step-1": "# -*- coding: utf-8 -*-\nimport sys\nfrom os import listdir, makedirs, unlink\nfrom os.path import isdir, join, isfile, exists\nfrom shutil import copy\nimport random\n\ndef clearDirectory( path ):#将dataset里面的文件都删除\n\tfor the_file in listdir(path):\n\t file_path = join(path, the_file)\n\t try:\n\t if isfile(file_path):\n\t unlink(file_path)\n\t except Exception, e:\n\t print e\n\nprint 'Number of arguments:', len(sys.argv), 'arguments.'\nprint 'argument list:', str(sys.argv)\n\nif len(sys.argv) < 3:\n\tprint 'Arguments is not enough! You need use the dataset and test category.'\n\tsys.exit();\n\ndatasetPath = sys.argv[1]\ncategory = sys.argv[2]\nif len(sys.argv) > 3:\n\ttrainingNum = (int)(sys.argv[3])\nelse:\n\ttrainingNum = 0\n\nif len(sys.argv) > 4:\n\ttestingNum = (int)(sys.argv[4])\nelse:\n\ttestingNum = 0\nprint 'dataset is ', datasetPath, ' and category is ', category\n\ncategories = [f for f in listdir(datasetPath) if isdir(join(datasetPath, f))]\nif category not in categories:\n\tprint 'category is not in the dataset please check that' \n\tsys.exit();\n\nprint 'start generating training and testing file...' \n\ncategoryPath = datasetPath + '/' + category\ncategoryFiles = [f for f in listdir(categoryPath) if isfile(join(categoryPath,f))]\nprint category, 'contains ', len(categoryFiles) , 'file'\notherCategories = [x for x in categories if x != category]\notherCategoriesFiles = [y + '/' + x for y in otherCategories for x in listdir(datasetPath + '/' + y)]\n\ndefaultNum = (int)(len(categoryFiles))\nif trainingNum <= 0:\n\ttrainingNum = defaultNum\nelif trainingNum > defaultNum:\n\ttrainingNum = defaultNum\nif testingNum <= 0:\n\ttestingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)\nelif testingNum > min(defaultNum / 2, len(categoryFiles) - testingNum):\n\ttestingNum = min(defaultNum / 2, len(categoryFiles) - testingNum)\nprint 'trainingNum is', trainingNum\nprint 'testingNum is', testingNum\nrand_smpl = [ categoryFiles[i] for i in sorted(random.sample(xrange(len(categoryFiles)), trainingNum)) ]\ntest_files = [x for x in categoryFiles if x not in rand_smpl]\n\ntest_smpl = [test_files[i] for i in random.sample(xrange(len(test_files)), testingNum)]\ntrainingDir = 'dataset/training'\ntestingDir = 'dataset/testing'\n\n\nif not exists(trainingDir):\n makedirs(trainingDir)\nif not exists(testingDir):\n makedirs(testingDir)\n\nclearDirectory(trainingDir)\nclearDirectory(testingDir)\n\n\n\ntext_file = open(\"training.txt\", \"w\")\ntrainingIndex = 1\nfor jpgfile in rand_smpl:\n\tfilepath = categoryPath + '/' + jpgfile\n\toutputFilePath = 'image_' + str(trainingIndex) + '.jpg'\n\ttext_file.write('dataset/training/' + outputFilePath + ' 1\\n')\n\tcopy(filepath, trainingDir + '/' + outputFilePath)\n\ttrainingIndex += 1\ntraining_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), trainingNum)]\nfor jpgfile in training_smpl:\n\tfilepath = datasetPath + '/' + jpgfile\n\toutputFilePath = 'image_' + str(trainingIndex) + '.jpg'\n\ttext_file.write('dataset/training/' + outputFilePath + ' 0\\n')\n\tcopy(filepath, trainingDir + '/' + outputFilePath)\n\ttrainingIndex += 1\ntext_file.close()\n\ntext_file = open(\"testing.txt\", \"w\")\ntrainingIndex = 1\nfor jpgfile in test_smpl:\n\tfilepath = categoryPath + '/' + jpgfile\n\toutputFilePath = 'image_' + str(trainingIndex) + '.jpg'\n\ttext_file.write('dataset/testing/' + outputFilePath + ' 1\\n')\n\tcopy(filepath, testingDir + '/' + outputFilePath)\n\ttrainingIndex += 1\ntesting_smpl = [ otherCategoriesFiles[i] for i in random.sample(xrange(len(otherCategoriesFiles)), testingNum)]\nfor jpgfile in testing_smpl:\n\tfilepath = datasetPath + '/' + jpgfile\n\toutputFilePath = 'image_' + str(trainingIndex) + '.jpg'\n\ttext_file.write('dataset/testing/' + outputFilePath + ' 0\\n')\n\tcopy(filepath, testingDir + '/' + outputFilePath)\n\ttrainingIndex += 1\n\ntext_file.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Digital Forensics Virtual File System (dfVFS).
dfVFS, or Digital Forensics Virtual File System, is a Python module
that provides read-only access to file-system objects from various
storage media types and file formats.
"""
|
flexible
|
{
"blob_id": "f7d3096d669946e13186a893ffc53067e0fd0a0a",
"index": 1065,
"step-1": "<mask token>\n",
"step-2": "# -*- coding: utf-8 -*-\n\"\"\"Digital Forensics Virtual File System (dfVFS).\n\ndfVFS, or Digital Forensics Virtual File System, is a Python module\nthat provides read-only access to file-system objects from various\nstorage media types and file formats.\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from pymongo import MongoClient
class MongoDB():
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
# 获取一条数据
def get_one(self, query):
return self.table.find_one(query, property={"_id":False})
# 获取多条数据
def get_all(self, query):
return self.table.find(query)
# 添加数据
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
# 删除数据
def delete(self, query):
return self.table.delete_many(query)
# 查看集合中是否包含满足的数据 如果有返回True
def check(self, query):
return self.table.find_one(query)
|
normal
|
{
"blob_id": "b5f88a6d119f2c3ce8fb77cf8c45b6c9252f5128",
"index": 7619,
"step-1": "<mask token>\n\n\nclass MongoDB:\n <mask token>\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n <mask token>\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-2": "<mask token>\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n <mask token>\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-3": "<mask token>\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n\n def get_all(self, query):\n return self.table.find(query)\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-4": "from pymongo import MongoClient\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n\n def get_all(self, query):\n return self.table.find(query)\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-5": "from pymongo import MongoClient\n\nclass MongoDB():\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n # 获取一条数据\n def get_one(self, query):\n return self.table.find_one(query, property={\"_id\":False})\n\n # 获取多条数据\n def get_all(self, query):\n return self.table.find(query)\n\n # 添加数据\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n # 删除数据\n def delete(self, query):\n return self.table.delete_many(query)\n\n # 查看集合中是否包含满足的数据 如果有返回True\n def check(self, query):\n return self.table.find_one(query)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# 심사문제 22
# 표준 입력으로 정수 두 개가 입력됩니다(첫 번째 입력 값의 범위는 1~20, 두 번째 입력 값의 범위는 10~30이며 첫 번째 입력 값은 두 번째 입력 값보다 항상 작습니다).
# 첫 번째 정수부터 두 번째 정수까지를 지수로 하는 2의 거듭제곱 리스트를 출력하는 프로그램을 만드세요
# (input에서 안내 문자열은 출력하지 않아야 합니다). 단, 리스트의 두 번째 요소와 뒤에서 두 번째 요소는 삭제한 뒤 출력하세요. 출력 결과는 리스트 형태라야 합니다.
start, stop = list(map(int, input().split()))
1 10
i = 0
my = [2 ** i for i in range(start, stop+1)]
my.pop(1)
my.pop(-2)
print(my)
# 심사문제 23
col, row = list(map(int, input().split()))
matrix = []
for i in range(row):
matrix.append(list(input()))
|
normal
|
{
"blob_id": "1f8040776a55d6fe52b64c714d4003469460e454",
"index": 7186,
"step-1": "# 심사문제 22\n# 표준 입력으로 정수 두 개가 입력됩니다(첫 번째 입력 값의 범위는 1~20, 두 번째 입력 값의 범위는 10~30이며 첫 번째 입력 값은 두 번째 입력 값보다 항상 작습니다).\n# 첫 번째 정수부터 두 번째 정수까지를 지수로 하는 2의 거듭제곱 리스트를 출력하는 프로그램을 만드세요\n# (input에서 안내 문자열은 출력하지 않아야 합니다). 단, 리스트의 두 번째 요소와 뒤에서 두 번째 요소는 삭제한 뒤 출력하세요. 출력 결과는 리스트 형태라야 합니다.\n\nstart, stop = list(map(int, input().split()))\n1 10\ni = 0\nmy = [2 ** i for i in range(start, stop+1)]\nmy.pop(1)\nmy.pop(-2)\nprint(my)\n\n# 심사문제 23\ncol, row = list(map(int, input().split()))\n\nmatrix = []\nfor i in range(row):\n matrix.append(list(input()))\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
pokemon_data = mongo.db.pokemon.find_one()
return render_template('index.html', pokemon_data=pokemon_data)
@app.route('/stats')
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list = []
for pokeman in stats:
pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':
pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,
'Attack': pokeman.attack, 'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.
sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.
generation, 'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
@app.route('/images')
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Base.prepare(engine, reflect=True)
<|reserved_special_token_0|>
@app.route('/')
def index():
pokemon_data = mongo.db.pokemon.find_one()
return render_template('index.html', pokemon_data=pokemon_data)
@app.route('/stats')
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list = []
for pokeman in stats:
pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':
pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,
'Attack': pokeman.attack, 'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.
sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.
generation, 'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
@app.route('/images')
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'
engine = create_engine(f'postgresql://{rds_connection_string}')
Base = automap_base()
Base.prepare(engine, reflect=True)
pokemon_sql = Base.classes.pokemon
app = Flask(__name__)
app.config['MONGO_URI'] = (
f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'
)
mongo = PyMongo(app)
@app.route('/')
def index():
pokemon_data = mongo.db.pokemon.find_one()
return render_template('index.html', pokemon_data=pokemon_data)
@app.route('/stats')
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list = []
for pokeman in stats:
pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':
pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,
'Attack': pokeman.attack, 'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.
sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.
generation, 'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
@app.route('/images')
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify, render_template, redirect
from flask_pymongo import PyMongo
from config import mongo_password, mongo_username, sql_username, sql_password
from bson.json_util import dumps
rds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'
engine = create_engine(f'postgresql://{rds_connection_string}')
Base = automap_base()
Base.prepare(engine, reflect=True)
pokemon_sql = Base.classes.pokemon
app = Flask(__name__)
app.config['MONGO_URI'] = (
f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'
)
mongo = PyMongo(app)
@app.route('/')
def index():
pokemon_data = mongo.db.pokemon.find_one()
return render_template('index.html', pokemon_data=pokemon_data)
@app.route('/stats')
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list = []
for pokeman in stats:
pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':
pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,
'Attack': pokeman.attack, 'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.
sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.
generation, 'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
@app.route('/images')
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify, render_template, redirect
from flask_pymongo import PyMongo
from config import mongo_password, mongo_username, sql_username, sql_password
from bson.json_util import dumps
# Database Setup
rds_connection_string = f"{sql_username}:{sql_password}@localhost:5432/Pokemon"
engine = create_engine(f'postgresql://{rds_connection_string}')
# Reflect existing database
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save reference to the table
pokemon_sql = Base.classes.pokemon
# Flask Setup
app = Flask(__name__)
#Set up MongoDB Database
app.config['MONGO_URI'] = f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route("/")
def index():
#Return the homepage
pokemon_data = mongo.db.pokemon.find_one()
return render_template("index.html", pokemon_data = pokemon_data)
#All Pokemon Stats
@app.route("/stats")
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list =[]
for pokeman in stats:
pokeman = {'Name': pokeman.name,
'Number': pokeman.number,
'Type_1': pokeman.type_1,
'Type_2': pokeman.type_2,
'HP': pokeman.hp,
'Attack': pokeman.attack,
'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk,
'Special_Defense': pokeman.sp_def,
'Speed': pokeman.speed,
'Generation': pokeman.generation,
'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
#Mongo DB image database
@app.route("/images")
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == "__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "15e1ce95398ff155fe594c3b39936d82d71ab9e2",
"index": 5015,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\[email protected]('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\[email protected]('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\n<mask token>\n",
"step-2": "<mask token>\nBase.prepare(engine, reflect=True)\n<mask token>\n\n\[email protected]('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\[email protected]('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\[email protected]('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\nrds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'\nengine = create_engine(f'postgresql://{rds_connection_string}')\nBase = automap_base()\nBase.prepare(engine, reflect=True)\npokemon_sql = Base.classes.pokemon\napp = Flask(__name__)\napp.config['MONGO_URI'] = (\n f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\n )\nmongo = PyMongo(app)\n\n\[email protected]('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\[email protected]('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\[email protected]('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom flask import Flask, jsonify, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom config import mongo_password, mongo_username, sql_username, sql_password\nfrom bson.json_util import dumps\nrds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'\nengine = create_engine(f'postgresql://{rds_connection_string}')\nBase = automap_base()\nBase.prepare(engine, reflect=True)\npokemon_sql = Base.classes.pokemon\napp = Flask(__name__)\napp.config['MONGO_URI'] = (\n f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\n )\nmongo = PyMongo(app)\n\n\[email protected]('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\[email protected]('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\[email protected]('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom flask import Flask, jsonify, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom config import mongo_password, mongo_username, sql_username, sql_password\nfrom bson.json_util import dumps\n\n\n# Database Setup\nrds_connection_string = f\"{sql_username}:{sql_password}@localhost:5432/Pokemon\"\nengine = create_engine(f'postgresql://{rds_connection_string}')\n\n# Reflect existing database\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\npokemon_sql = Base.classes.pokemon\n\n# Flask Setup\napp = Flask(__name__)\n\n#Set up MongoDB Database\napp.config['MONGO_URI'] = f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\nmongo = PyMongo(app)\n\n\[email protected](\"/\")\ndef index():\n #Return the homepage\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template(\"index.html\", pokemon_data = pokemon_data)\n\n\n#All Pokemon Stats\[email protected](\"/stats\")\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list =[]\n for pokeman in stats:\n pokeman = {'Name': pokeman.name,\n 'Number': pokeman.number,\n 'Type_1': pokeman.type_1,\n 'Type_2': pokeman.type_2,\n 'HP': pokeman.hp,\n 'Attack': pokeman.attack,\n 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk,\n 'Special_Defense': pokeman.sp_def,\n 'Speed': pokeman.speed,\n 'Generation': pokeman.generation,\n 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n#Mongo DB image database\[email protected](\"/images\")\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from ctypes import *
import os
import sys
import time
import datetime
import subprocess
import RPi.GPIO as GPIO
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
#import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
import ST7735 as TFT
import pigpio
# use BCM pin define
pin_meas = 24 # 18 in BOARD
pin_black = 25 # 22 in BOARD
pin_led = 26 # 37 in BOARD
HOME_DIR = "/home/pi/QSS003_python/"
C12880_LIB = HOME_DIR + "Dual_C12880.so"
# use BCM pin define
GATE_LED_PIN1 = 4 # 7 in BOARD
GATE_LED_PIN2 = 22 # 15 in BOARD
PWM_LED_PIN1 = 18 # in pigpio
PWM_LED_PIN2 = 13 # in pigpio
PWM_FREQ = 500
DUTY_MIN = 0
DUTY_MAX = 900000 # original = 1000000
LED_CURR_MIN = 60 #mA
LED_CURR_MAX = 330 #mA
LED_DUTY_CONST = 10000/3
# use BCM pin define
AOPIN = 23 # 16 in BOARD
RSTPIN = 12 # 32 in BOARD
SPI_PORT = 1
SPI_CH = 0
SPI_SPEED = 4000000
COLOR_RED = (255,0,0)
COLOR_GREEN = (0,255,0)
COLOR_BLUE = (0,0,255)
COLOR_WHITE = (255,255,255)
COLOR_BLACK = (0,0,0)
COLOR_YELLOW = (255,255,0)
COLOR_PURPLE = (255,0, 255)
COLOR_CYAN = (0, 255,255)
TFT_SIZE = (128, 128)
LINE1Y = 15
LINE2Y = 30
LINE3Y = 45
LINE4Y = 65
LINE5Y = 80
LINE6Y = 100
SPACE1 = 15
SPACE2 = 20
time.sleep(1)
C12880 = cdll.LoadLibrary(C12880_LIB)
if len(sys.argv) < 6:
error_str = str(sys.argv[0]) + " led1_current led2_current led_stable_time int_time1 int_time2"
print(error_str)
else:
# board initialization
C12880.Setup() # init spectrometer
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pin_meas, GPIO.IN)
GPIO.setup(pin_black, GPIO.IN)
GPIO.setup(pin_led, GPIO.OUT)
GPIO.output(pin_led, GPIO.LOW)
GPIO.setup(GATE_LED_PIN1, GPIO.OUT)
GPIO.setup(GATE_LED_PIN2, GPIO.OUT)
GPIO.output(GATE_LED_PIN1, GPIO.HIGH) #close
GPIO.output(GATE_LED_PIN2, GPIO.HIGH) #close
data1 = (c_uint * 288)() # data to store spectrum data
data2 = (c_uint * 288)()
meas = 1
black = 1
fnameindex = 0
# Display init
spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz = SPI_SPEED)
disp = TFT.ST7735(dc = AOPIN, rst = RSTPIN, spi = spi, width = 128, height = 128)
disp.begin()
disp.clear()
img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)
draw = ImageDraw.Draw(img)
font = "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"
fontout = ImageFont.truetype(font,11)
draw.text((0,LINE1Y), " Mode: Measure", font = fontout, fill = COLOR_BLUE)
draw.text((0,LINE2Y), " Bilirubin", font = fontout, fill = COLOR_BLUE)
draw.text((0,LINE4Y), " SiO2", font = fontout, fill = COLOR_BLUE)
disp.display(img)
led1_current = int(sys.argv[1])
led2_current = int(sys.argv[2])
led_stable_time = float(sys.argv[3])
int_time1 = int(sys.argv[4])
int_time2 = int(sys.argv[5])
if (led1_current < LED_CURR_MIN):
led1_current = LED_CURR_MIN
elif (led1_current > LED_CURR_MAX):
led1_current = LED_CURR_MAX
if (led2_current < LED_CURR_MIN):
led2_current = LED_CURR_MIN
elif (led2_current > LED_CURR_MAX):
led2_current = LED_CURR_MAX
print("led1_current = "+ str(led1_current))
print("led2_current = "+ str(led2_current))
led1_duty = (led1_current - LED_CURR_MIN)*LED_DUTY_CONST
led2_duty = (led2_current - LED_CURR_MIN)*LED_DUTY_CONST
print("led1_duty = "+ str(led1_duty))
print("led2_duty = "+ str(led2_duty))
pi = pigpio.pi()
while (1):
#wait until black or meas buttom is pressed
while (meas and black):
if GPIO.input(pin_meas) == GPIO.LOW:
meas = 0
print("meas low")
if GPIO.input(pin_black) == GPIO.LOW:
black = 0
print("black low")
GPIO.output(pin_led, GPIO.HIGH)
pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))
pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))
if (led1_duty > 0):
GPIO.output(GATE_LED_PIN1, GPIO.LOW) # open
if (led2_duty > 0):
GPIO.output(GATE_LED_PIN2, GPIO.LOW) # open
time.sleep(led_stable_time)
if (black == 0):
fname = "dual_black.txt"
else:
fname = "dual_desktop_" + str(fnameindex) + ".txt"
fname = HOME_DIR + fname
#C12880.ReadSpectrometer(int_time, data)
C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)
# print the data on tft screen
draw.rectangle((0, LINE3Y, 128, LINE3Y+SPACE2), COLOR_WHITE)
draw.rectangle((0, LINE5Y, 128, LINE5Y+SPACE2), COLOR_WHITE)
draw.rectangle((0, LINE6Y, 128, LINE6Y+SPACE1), COLOR_WHITE)
fontout = ImageFont.truetype(font,16)
draw.text((0,LINE3Y)," 12.1 mg/dL", font = fontout, fill = COLOR_RED)
draw.text((0,LINE5Y)," 66%", font = fontout, fill = COLOR_RED)
fontout = ImageFont.truetype(font,10)
draw.text((0,LINE6Y),str(datetime.datetime.now()), font = fontout, fill = COLOR_BLUE)
disp.display(img)
#out = [str(line) + '\n' for line in data]
fp = open(fname, "w+")
#print(out)
#fp.writelines(out)
for i in range(0,288):
fp.write(str(data1[i]) + ", " + str(data2[i]) + ", \n")
fp.close()
if (meas == 0):
fnameindex = fnameindex + 1
pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)
pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)
GPIO.output(GATE_LED_PIN1, GPIO.HIGH) # close
GPIO.output(GATE_LED_PIN2, GPIO.HIGH) # close
# time.sleep(led_stable_time) # for LED test
meas = 1
black = 1
GPIO.output(pin_led, GPIO.LOW) #turn off measure LED
print("done")
|
normal
|
{
"blob_id": "d250cc0aafdd48cb0eb56108d9c7148153cde002",
"index": 6840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntime.sleep(1)\n<mask token>\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-3": "<mask token>\npin_meas = 24\npin_black = 25\npin_led = 26\nHOME_DIR = '/home/pi/QSS003_python/'\nC12880_LIB = HOME_DIR + 'Dual_C12880.so'\nGATE_LED_PIN1 = 4\nGATE_LED_PIN2 = 22\nPWM_LED_PIN1 = 18\nPWM_LED_PIN2 = 13\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\nLED_CURR_MIN = 60\nLED_CURR_MAX = 330\nLED_DUTY_CONST = 10000 / 3\nAOPIN = 23\nRSTPIN = 12\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\nCOLOR_RED = 255, 0, 0\nCOLOR_GREEN = 0, 255, 0\nCOLOR_BLUE = 0, 0, 255\nCOLOR_WHITE = 255, 255, 255\nCOLOR_BLACK = 0, 0, 0\nCOLOR_YELLOW = 255, 255, 0\nCOLOR_PURPLE = 255, 0, 255\nCOLOR_CYAN = 0, 255, 255\nTFT_SIZE = 128, 128\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\nSPACE1 = 15\nSPACE2 = 20\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-4": "from ctypes import *\nimport os\nimport sys\nimport time\nimport datetime\nimport subprocess\nimport RPi.GPIO as GPIO\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport Adafruit_GPIO.SPI as SPI\nimport ST7735 as TFT\nimport pigpio\npin_meas = 24\npin_black = 25\npin_led = 26\nHOME_DIR = '/home/pi/QSS003_python/'\nC12880_LIB = HOME_DIR + 'Dual_C12880.so'\nGATE_LED_PIN1 = 4\nGATE_LED_PIN2 = 22\nPWM_LED_PIN1 = 18\nPWM_LED_PIN2 = 13\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\nLED_CURR_MIN = 60\nLED_CURR_MAX = 330\nLED_DUTY_CONST = 10000 / 3\nAOPIN = 23\nRSTPIN = 12\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\nCOLOR_RED = 255, 0, 0\nCOLOR_GREEN = 0, 255, 0\nCOLOR_BLUE = 0, 0, 255\nCOLOR_WHITE = 255, 255, 255\nCOLOR_BLACK = 0, 0, 0\nCOLOR_YELLOW = 255, 255, 0\nCOLOR_PURPLE = 255, 0, 255\nCOLOR_CYAN = 0, 255, 255\nTFT_SIZE = 128, 128\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\nSPACE1 = 15\nSPACE2 = 20\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-5": "from ctypes import *\nimport os\nimport sys\nimport time\nimport datetime\nimport subprocess\nimport RPi.GPIO as GPIO\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n#import Adafruit_GPIO as GPIO\nimport Adafruit_GPIO.SPI as SPI\nimport ST7735 as TFT\nimport pigpio\n\n# use BCM pin define\npin_meas = 24 \t# 18 in BOARD\npin_black = 25\t# 22 in BOARD\npin_led = 26 # 37 in BOARD\n\nHOME_DIR = \"/home/pi/QSS003_python/\"\nC12880_LIB = HOME_DIR + \"Dual_C12880.so\"\n\n# use BCM pin define\nGATE_LED_PIN1 = 4\t# 7 in BOARD\nGATE_LED_PIN2 = 22\t# 15 in BOARD\nPWM_LED_PIN1 = 18 # in pigpio\nPWM_LED_PIN2 = 13 # in pigpio\n\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\t# original = 1000000\nLED_CURR_MIN = 60\t#mA\nLED_CURR_MAX = 330\t#mA\nLED_DUTY_CONST = 10000/3\n\n# use BCM pin define\nAOPIN = 23\t# 16 in BOARD\nRSTPIN = 12\t# 32 in BOARD\n\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\n\nCOLOR_RED \t= (255,0,0)\nCOLOR_GREEN = (0,255,0)\nCOLOR_BLUE\t= (0,0,255)\nCOLOR_WHITE\t= (255,255,255)\nCOLOR_BLACK = (0,0,0)\nCOLOR_YELLOW = (255,255,0)\nCOLOR_PURPLE = (255,0, 255)\nCOLOR_CYAN = (0, 255,255)\nTFT_SIZE = (128, 128)\n\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\n\nSPACE1 = 15\nSPACE2 = 20\n\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\n\nif len(sys.argv) < 6:\n\terror_str = str(sys.argv[0]) + \" led1_current led2_current led_stable_time int_time1 int_time2\"\n\tprint(error_str)\nelse:\n\t# board initialization \n\tC12880.Setup() # init spectrometer\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setwarnings(False)\n\tGPIO.setup(pin_meas, GPIO.IN)\n\tGPIO.setup(pin_black, GPIO.IN)\n\tGPIO.setup(pin_led, GPIO.OUT)\n\tGPIO.output(pin_led, GPIO.LOW)\n\tGPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n\tGPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n\tGPIO.output(GATE_LED_PIN1, GPIO.HIGH)\t#close\n\tGPIO.output(GATE_LED_PIN2, GPIO.HIGH)\t#close\n\n\tdata1 = (c_uint * 288)() # data to store spectrum data\n\tdata2 = (c_uint * 288)()\n\tmeas = 1\n\tblack = 1\n\tfnameindex = 0\n\n\t# Display init\n\tspi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz = SPI_SPEED)\n\tdisp = TFT.ST7735(dc = AOPIN, rst = RSTPIN, spi = spi, width = 128, height = 128)\n\tdisp.begin()\n\tdisp.clear()\n\timg = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n\tdraw = ImageDraw.Draw(img)\n\tfont = \"/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf\"\n\tfontout = ImageFont.truetype(font,11)\n\tdraw.text((0,LINE1Y), \" Mode: Measure\", font = fontout, fill = COLOR_BLUE)\n\tdraw.text((0,LINE2Y), \" Bilirubin\", font = fontout, fill = COLOR_BLUE)\n\tdraw.text((0,LINE4Y), \" SiO2\", font = fontout, fill = COLOR_BLUE)\n\tdisp.display(img)\n\n\tled1_current = int(sys.argv[1])\n\tled2_current = int(sys.argv[2])\n\tled_stable_time = float(sys.argv[3])\n\tint_time1 = int(sys.argv[4])\n\tint_time2 = int(sys.argv[5])\n\n\tif (led1_current < LED_CURR_MIN):\n\t\tled1_current = LED_CURR_MIN\n\telif (led1_current > LED_CURR_MAX):\n\t\tled1_current = LED_CURR_MAX\n\n\tif (led2_current < LED_CURR_MIN):\n\t\tled2_current = LED_CURR_MIN\n\telif (led2_current > LED_CURR_MAX):\n\t\tled2_current = LED_CURR_MAX\n\n\tprint(\"led1_current = \"+ str(led1_current))\n\tprint(\"led2_current = \"+ str(led2_current))\n\n\tled1_duty = (led1_current - LED_CURR_MIN)*LED_DUTY_CONST\n\tled2_duty = (led2_current - LED_CURR_MIN)*LED_DUTY_CONST\n\n\tprint(\"led1_duty = \"+ str(led1_duty))\n\tprint(\"led2_duty = \"+ str(led2_duty))\n\n\tpi = pigpio.pi()\n\n\twhile (1):\n\t\t#wait until black or meas buttom is pressed\n\t\twhile (meas and black):\n\t\t\tif GPIO.input(pin_meas) == GPIO.LOW:\n\t\t\t\tmeas = 0\n\t\t\t\tprint(\"meas low\")\n\t\t\tif GPIO.input(pin_black) == GPIO.LOW:\n\t\t\t\tblack = 0\n\t\t\t\tprint(\"black low\")\n\n\t\tGPIO.output(pin_led, GPIO.HIGH)\n\t\tpi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n\t\tpi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n\t\tif (led1_duty > 0):\n\t\t\tGPIO.output(GATE_LED_PIN1, GPIO.LOW)\t# open\n\t\tif (led2_duty > 0):\n\t\t\tGPIO.output(GATE_LED_PIN2, GPIO.LOW)\t# open\n\n\t\ttime.sleep(led_stable_time)\n\n\t\tif (black == 0):\n\t\t\tfname = \"dual_black.txt\"\n\t\telse:\n\t\t\tfname = \"dual_desktop_\" + str(fnameindex) + \".txt\"\n\t\tfname = HOME_DIR + fname\n\n\t\t#C12880.ReadSpectrometer(int_time, data)\n\t\tC12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n\n\t\t# print the data on tft screen \n\t\tdraw.rectangle((0, LINE3Y, 128, LINE3Y+SPACE2), COLOR_WHITE)\n\t\tdraw.rectangle((0, LINE5Y, 128, LINE5Y+SPACE2), COLOR_WHITE)\n\t\tdraw.rectangle((0, LINE6Y, 128, LINE6Y+SPACE1), COLOR_WHITE)\n\t\tfontout = ImageFont.truetype(font,16)\n\t\tdraw.text((0,LINE3Y),\" 12.1 mg/dL\", font = fontout, fill = COLOR_RED)\n\t\tdraw.text((0,LINE5Y),\" 66%\", font = fontout, fill = COLOR_RED)\n\t\tfontout = ImageFont.truetype(font,10)\n\t\tdraw.text((0,LINE6Y),str(datetime.datetime.now()), font = fontout, fill = COLOR_BLUE)\n\t\tdisp.display(img)\n\n\t\t#out = [str(line) + '\\n' for line in data]\n\t\tfp = open(fname, \"w+\")\n\t\t#print(out)\n\t\t#fp.writelines(out)\n\t\tfor i in range(0,288):\n\t\t\tfp.write(str(data1[i]) + \", \" + str(data2[i]) + \", \\n\")\n\t\tfp.close()\n\n\t\tif (meas == 0):\n\t\t\tfnameindex = fnameindex + 1\n\n\t\tpi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n\t\tpi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n\t\tGPIO.output(GATE_LED_PIN1, GPIO.HIGH) # close\n\t\tGPIO.output(GATE_LED_PIN2, GPIO.HIGH) # close\n\n\t\t# time.sleep(led_stable_time)\t# for LED test\n\n\t\tmeas = 1\n\t\tblack = 1\n\n\t\tGPIO.output(pin_led, GPIO.LOW) #turn off measure LED\n\t\tprint(\"done\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import sys
from util import load_model
from keras.preprocessing.text import hashing_trick
from keras.preprocessing.sequence import pad_sequences
from southpark.southpark_generative import string_one_hot, char_one_hot
MODEL_NAME = "script_gen_demo_model"
def main():
print("Loading model...")
model, charset = load_model(MODEL_NAME)
print(charset)
seed_text = input("Enter a String: ").strip()
print()
generate_script(seed_text, model, charset)
def generate_script(seed_text, model, charset):
sys.stdout.write(seed_text)
sys.stdout.flush()
next_char = None
should_stop = False
while not should_stop:
prev_char = next_char
next_char = sample(model, seed_text, charset, temp = 0.2)
sys.stdout.write(next_char)
sys.stdout.flush()
if prev_char == '\n' and prev_char == next_char:
should_stop = True
def sample(model, string, charset, temp = 1.0):
inputs = [string_one_hot(string, charset)]
inputs = pad_sequences(inputs, padding = 'post', maxlen = 64)
preds = model.predict(inputs)[0]
return charset[sample_preds(preds, temp)]
def sample_preds(results, temperature = 1.0):
# helper function to sample an index from a probability array
if temperature <= 0.0:
return np.argmax(results)
#num_choices = results.shape[0] # (batch, outputs)
probs = np.exp(np.log(results) / temperature)
probs /= np.sum(probs)
return np.random.choice(len(results), p = probs)
#preds = np.asarray(preds).astype('float64')
#preds = np.log(preds) / temperature
#exp_preds = np.exp(preds)
#preds = exp_preds / np.sum(exp_preds)
#probas = np.random.multinomial(1, preds, 1)
#
#print(probas)
#return np.argmax(probas)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "ed7b29a4d7f3a48884434373418c3528f2f397ac",
"index": 271,
"step-1": "<mask token>\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n<mask token>\nMODEL_NAME = 'script_gen_demo_model'\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\nimport numpy as np\nimport sys\nfrom util import load_model\nfrom keras.preprocessing.text import hashing_trick\nfrom keras.preprocessing.sequence import pad_sequences\nfrom southpark.southpark_generative import string_one_hot, char_one_hot\nMODEL_NAME = 'script_gen_demo_model'\n\n\ndef main():\n print('Loading model...')\n model, charset = load_model(MODEL_NAME)\n print(charset)\n seed_text = input('Enter a String: ').strip()\n print()\n generate_script(seed_text, model, charset)\n\n\ndef generate_script(seed_text, model, charset):\n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp=0.2)\n sys.stdout.write(next_char)\n sys.stdout.flush()\n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n\ndef sample(model, string, charset, temp=1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding='post', maxlen=64)\n preds = model.predict(inputs)[0]\n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature=1.0):\n if temperature <= 0.0:\n return np.argmax(results)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p=probs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n\nimport numpy as np\nimport sys\n\nfrom util import load_model\nfrom keras.preprocessing.text import hashing_trick\nfrom keras.preprocessing.sequence import pad_sequences \n\nfrom southpark.southpark_generative import string_one_hot, char_one_hot\n\n\nMODEL_NAME = \"script_gen_demo_model\"\n\ndef main():\n print(\"Loading model...\") \n model, charset = load_model(MODEL_NAME)\n \n print(charset)\n\n seed_text = input(\"Enter a String: \").strip()\n print()\n generate_script(seed_text, model, charset)\n\ndef generate_script(seed_text, model, charset):\n \n sys.stdout.write(seed_text)\n sys.stdout.flush()\n next_char = None\n should_stop = False\n while not should_stop:\n prev_char = next_char\n next_char = sample(model, seed_text, charset, temp = 0.2)\n \n sys.stdout.write(next_char)\n sys.stdout.flush()\n \n if prev_char == '\\n' and prev_char == next_char:\n should_stop = True\n\n \ndef sample(model, string, charset, temp = 1.0):\n inputs = [string_one_hot(string, charset)]\n inputs = pad_sequences(inputs, padding = 'post', maxlen = 64)\n preds = model.predict(inputs)[0]\n \n return charset[sample_preds(preds, temp)]\n\n\ndef sample_preds(results, temperature = 1.0):\n # helper function to sample an index from a probability array\n\n if temperature <= 0.0:\n return np.argmax(results)\n \n #num_choices = results.shape[0] # (batch, outputs)\n probs = np.exp(np.log(results) / temperature)\n probs /= np.sum(probs)\n return np.random.choice(len(results), p = probs)\n\n\n #preds = np.asarray(preds).astype('float64')\n #preds = np.log(preds) / temperature\n #exp_preds = np.exp(preds)\n #preds = exp_preds / np.sum(exp_preds)\n #probas = np.random.multinomial(1, preds, 1)\n #\n #print(probas)\n\n #return np.argmax(probas)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import _thread
import os
from queue import Queue
from threading import Thread
import random
import io
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from datetime import datetime, timedelta
import time
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pymorphy2
from pymongo import MongoClient
import config
import matplotlib
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),
filter(
lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,
tokenizer.tokenize(sentence.replace('\xa0', ' '))
)
)
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))
if not top_words:
return
# def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# return "hsl(%d, 100%%, %d%%)" % (random.randint(0, 360), random.randint(20, 50))
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "rgb(0, 0, 0)"
sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)
wordcloud = WordCloud(
max_words=50,
max_font_size=500,
background_color='white',
margin=5,
width=1000,
height=1000,
stopwords=sw,
prefer_horizontal=0.7,
font_path='font.ttf'
).generate(' '.join(top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Если ты хочешь получить свое облако тегов за {current_year} '
'год, отправь мне слово "облако" без кавычек 🙃')
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
# if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Чтобы составить облако тегов, '
# 'подпишись на меня https://vk.com/wwcloud 🙄')
# time.sleep(1)
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Когда будешь готов, снова отправь кодовое слово "облако" 😊')
# processing.remove(user_id)
# time.sleep(5)
# return
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов☹️')
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов ☹️')
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(
clouded,
album_id=config.album_id,
group_id=config.group_id
)[0]
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',
attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,
message='Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, я превысил лимит количества постов на сегодня 😭')
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Создай новое облако завтра, и я выложу его на стену группы 😎')
print('Removed (3) cloud from processing for', user_id)
if post_id:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'post': post_id,
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
attachment='wall{}_{}'.format(photo['owner_id'], post_id))
# else:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
# if send:
# vk_group.messages.send(
# user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',
# attachment='wall-136503501_467'
# )
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
# Получаем задание из очереди
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
# Сообщаем о выполненном задании
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
|
normal
|
{
"blob_id": "03ce69924c885e59e40689dc63e50d54b89649f7",
"index": 2924,
"step-1": "<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\n<mask token>\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\n<mask token>\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\n<mask token>\nprint('Done')\n<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-4": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-5": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\n\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\n\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\n\nprocessing = []\n\ncurrent_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))\n\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),\n filter(\n lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,\n tokenizer.tokenize(sentence.replace('\\xa0', ' '))\n )\n )\n\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))\n if not top_words:\n return\n\n # def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n # return \"hsl(%d, 100%%, %d%%)\" % (random.randint(0, 360), random.randint(20, 50))\n\n def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n return \"rgb(0, 0, 0)\"\n\n sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)\n wordcloud = WordCloud(\n max_words=50,\n max_font_size=500,\n background_color='white',\n margin=5,\n width=1000,\n height=1000,\n stopwords=sw,\n prefer_horizontal=0.7,\n font_path='font.ttf'\n ).generate(' '.join(top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Если ты хочешь получить свое облако тегов за {current_year} '\n 'год, отправь мне слово \"облако\" без кавычек 🙃')\n return\n\n processing.append(user_id)\n\n print('Generating cloud for', user_id)\n try:\n # if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Чтобы составить облако тегов, '\n # 'подпишись на меня https://vk.com/wwcloud 🙄')\n # time.sleep(1)\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Когда будешь готов, снова отправь кодовое слово \"облако\" 😊')\n # processing.remove(user_id)\n # time.sleep(5)\n # return\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов☹️')\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов ☹️')\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(\n clouded,\n album_id=config.album_id,\n group_id=config.group_id\n )[0]\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',\n attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')\n\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,\n message='Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, я превысил лимит количества постов на сегодня 😭')\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Создай новое облако завтра, и я выложу его на стену группы 😎')\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'post': post_id,\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n attachment='wall{}_{}'.format(photo['owner_id'], post_id))\n # else:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n\n # if send:\n # vk_group.messages.send(\n # user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',\n # attachment='wall-136503501_467'\n # )\n\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n # Получаем задание из очереди\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n # Сообщаем о выполненном задании\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n\n for event in longpoll.listen():\n if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class test(wx.Frame):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class test(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class test(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = test(parent=None, id=-1)
frame.show()
app.mainloop()
<|reserved_special_token_1|>
import wx
class test(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = test(parent=None, id=-1)
frame.show()
app.mainloop()
<|reserved_special_token_1|>
#!/usr/bin/python
import wx
class test(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self,parent,id,"TestFrame",size=(500,500))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = test(parent=None,id=-1,)
frame.show()
app.mainloop()
|
flexible
|
{
"blob_id": "e204cbbf36ac180eba0e95916345088c77bca7c0",
"index": 5001,
"step-1": "<mask token>\n\n\nclass test(wx.Frame):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n",
"step-4": "import wx\n\n\nclass test(wx.Frame):\n\n def __init__(self, parent, id):\n wx.Frame.__init__(self, parent, id, 'TestFrame', size=(500, 500))\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None, id=-1)\n frame.show()\n app.mainloop()\n",
"step-5": "#!/usr/bin/python\nimport wx\n\nclass test(wx.Frame):\n def __init__(self,parent,id):\n wx.Frame.__init__(self,parent,id,\"TestFrame\",size=(500,500))\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n frame = test(parent=None,id=-1,)\n frame.show()\n app.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class StockPicking(orm.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StockPicking(orm.Model):
<|reserved_special_token_0|>
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,
context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id, 'user_id': user_id,
'partner_id': partner.id, 'account_id': account_id,
'payment_term': payment_term, 'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id, 'currency_id': currency_id,
'journal_id': journal_id}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StockPicking(orm.Model):
_inherit = 'stock.picking'
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,
context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id, 'user_id': user_id,
'partner_id': partner.id, 'account_id': account_id,
'payment_term': payment_term, 'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id, 'currency_id': currency_id,
'journal_id': journal_id}
<|reserved_special_token_1|>
from openerp.osv import orm
class StockPicking(orm.Model):
_inherit = 'stock.picking'
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,
context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id, 'user_id': user_id,
'partner_id': partner.id, 'account_id': account_id,
'payment_term': payment_term, 'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id, 'currency_id': currency_id,
'journal_id': journal_id}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nicola Malcontenti <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class StockPicking(orm.Model):
_inherit = "stock.picking"
#def _get_invoice_vals(self, cr, uid, key, inv_type,
# journal_id, origin, context=None):
# invoice_vals = super(StockPicking, self)._get_invoice_vals(
# cr, uid, key, inv_type, journal_id, origin, context=context)
# if context.get('active_id'):
# picking_id = int(context['active_id'])
# partner_id = self.browse(cr, uid, picking_id, context=context).partner_id
# if partner_id:
# invoice_vals['address_shipping_id'] = partner_id.id
# return invoice_vals
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {
'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id,
'user_id': user_id,
'partner_id': partner.id,
'account_id': account_id,
'payment_term': payment_term,
'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id,
'currency_id': currency_id,
'journal_id': journal_id,
}
|
flexible
|
{
"blob_id": "b111d799b9e71cf36253c37f83dc0cdc8887a32e",
"index": 7404,
"step-1": "<mask token>\n\n\nclass StockPicking(orm.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StockPicking(orm.Model):\n <mask token>\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-3": "<mask token>\n\n\nclass StockPicking(orm.Model):\n _inherit = 'stock.picking'\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-4": "from openerp.osv import orm\n\n\nclass StockPicking(orm.Model):\n _inherit = 'stock.picking'\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin,\n context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id, 'user_id': user_id,\n 'partner_id': partner.id, 'account_id': account_id,\n 'payment_term': payment_term, 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id, 'currency_id': currency_id,\n 'journal_id': journal_id}\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)\n# Author: Nicola Malcontenti <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm\n\n\nclass StockPicking(orm.Model):\n _inherit = \"stock.picking\"\n\n #def _get_invoice_vals(self, cr, uid, key, inv_type,\n # journal_id, origin, context=None):\n # invoice_vals = super(StockPicking, self)._get_invoice_vals(\n # cr, uid, key, inv_type, journal_id, origin, context=context)\n # if context.get('active_id'):\n # picking_id = int(context['active_id'])\n # partner_id = self.browse(cr, uid, picking_id, context=context).partner_id\n # if partner_id:\n # invoice_vals['address_shipping_id'] = partner_id.id\n # return invoice_vals\n\n def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):\n if context is None:\n context = {}\n partner, currency_id, company_id, user_id = key\n if inv_type in ('out_invoice', 'out_refund'):\n account_id = partner.property_account_receivable.id\n payment_term = partner.property_payment_term.id or False\n else:\n account_id = partner.property_account_payable.id\n payment_term = partner.property_supplier_payment_term.id or False\n return {\n 'origin': origin and origin.picking_id.name or origin,\n 'date_invoice': context.get('date_inv', False),\n 'address_shipping_id': partner.id,\n 'user_id': user_id,\n 'partner_id': partner.id,\n 'account_id': account_id,\n 'payment_term': payment_term,\n 'type': inv_type,\n 'fiscal_position': partner.property_account_position.id,\n 'company_id': company_id,\n 'currency_id': currency_id,\n 'journal_id': journal_id,\n }\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from core import Postgresdb
db = Postgresdb()
print(db)
|
normal
|
{
"blob_id": "962a9781e4f2ad787dd695896b6455c9b336603a",
"index": 7178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(db)\n",
"step-3": "<mask token>\ndb = Postgresdb()\nprint(db)\n",
"step-4": "from core import Postgresdb\ndb = Postgresdb()\nprint(db)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
<|reserved_special_token_0|>
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
elif obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
def read_all_paths(n):
all_paths = {}
with open(n + '.txt', 'r') as infile:
for line in infile:
path = ast.literal_eval(line)
if path:
dest = path[-1][0]
if dest in all_paths:
all_paths[dest].append(path)
else:
all_paths[dest] = [path]
return all_paths
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
elif obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
def is_indep(obs_dict, all_paths, g):
for path in all_paths:
block = is_blocked(path, obs_dict, g)
if block:
continue
else:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
def read_all_paths(n):
all_paths = {}
with open(n + '.txt', 'r') as infile:
for line in infile:
path = ast.literal_eval(line)
if path:
dest = path[-1][0]
if dest in all_paths:
all_paths[dest].append(path)
else:
all_paths[dest] = [path]
return all_paths
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
elif obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
def is_indep(obs_dict, all_paths, g):
for path in all_paths:
block = is_blocked(path, obs_dict, g)
if block:
continue
else:
return False
return True
if __name__ == '__main__':
g = Graph()
add_nodes(g)
add_edges(g)
add_desc(g)
g.print_all_edges()
g.print_all_descs()
for n in g.nodes.keys():
g.get_all_paths(n, n)
all_nodes = list(g.nodes.keys())
all_paths = {}
for n in all_nodes:
all_paths[n] = read_all_paths(n)
s = len(all_nodes)
obs_dict = {}
combs = list(itertools.product([0, 1], repeat=s))
for c in combs:
for n, val in zip(all_nodes, c):
obs_dict[n] = val
for i, j in itertools.combinations(all_nodes, 2):
indep = is_indep(obs_dict, all_paths[i][j], g)
if indep:
observed = [all_nodes[idx] for idx, val in enumerate(c) if val]
if not i in observed and not j in observed:
print(i, j, str(observed))
g.reset_files()
<|reserved_special_token_1|>
from graph import Graph
import ast
import itertools
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
def read_all_paths(n):
all_paths = {}
with open(n + '.txt', 'r') as infile:
for line in infile:
path = ast.literal_eval(line)
if path:
dest = path[-1][0]
if dest in all_paths:
all_paths[dest].append(path)
else:
all_paths[dest] = [path]
return all_paths
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
elif obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
def is_indep(obs_dict, all_paths, g):
for path in all_paths:
block = is_blocked(path, obs_dict, g)
if block:
continue
else:
return False
return True
if __name__ == '__main__':
g = Graph()
add_nodes(g)
add_edges(g)
add_desc(g)
g.print_all_edges()
g.print_all_descs()
for n in g.nodes.keys():
g.get_all_paths(n, n)
all_nodes = list(g.nodes.keys())
all_paths = {}
for n in all_nodes:
all_paths[n] = read_all_paths(n)
s = len(all_nodes)
obs_dict = {}
combs = list(itertools.product([0, 1], repeat=s))
for c in combs:
for n, val in zip(all_nodes, c):
obs_dict[n] = val
for i, j in itertools.combinations(all_nodes, 2):
indep = is_indep(obs_dict, all_paths[i][j], g)
if indep:
observed = [all_nodes[idx] for idx, val in enumerate(c) if val]
if not i in observed and not j in observed:
print(i, j, str(observed))
g.reset_files()
<|reserved_special_token_1|>
from graph import Graph
import ast
import itertools
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
def read_all_paths(n):
all_paths = {}
with open(n+'.txt', 'r') as infile:
for line in infile:
path = ast.literal_eval(line)
if path:
dest = path[-1][0]
if dest in all_paths:
all_paths[dest].append(path)
else:
all_paths[dest] = [path]
return all_paths
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
# try to find blocking transitions - either non-observed v-structures, or observed regulars
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
# V-structure
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
else:
# not V-structure
if obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
def is_indep(obs_dict, all_paths, g):
for path in all_paths:
block = is_blocked(path, obs_dict, g)
if block:
continue
else:
# we have found a non-blocked path, so indep does not hold
return False
return True
if __name__=='__main__':
g = Graph()
add_nodes(g)
add_edges(g)
add_desc(g)
g.print_all_edges()
g.print_all_descs()
for n in g.nodes.keys():
g.get_all_paths(n, n)
all_nodes = list(g.nodes.keys())
all_paths = {}
for n in all_nodes:
all_paths[n] = read_all_paths(n)
s = len(all_nodes)
obs_dict = {}
combs = list(itertools.product([0,1], repeat = s))
for c in combs:
for n, val in zip(all_nodes, c):
obs_dict[n] = val
for i, j in itertools.combinations(all_nodes, 2):
indep = is_indep(obs_dict, all_paths[i][j], g)
if indep:
observed = [all_nodes[idx] for idx, val in enumerate(c) if val]
if (not (i in observed)) and (not (j in observed)):
print(i, j, str(observed))
# print(i, j, str([all_nodes[idx] for idx, val in enumerate(c) if val]))
g.reset_files()
|
flexible
|
{
"blob_id": "8efee4ad16e938e85a500e5aebf5154b5708b277",
"index": 9287,
"step-1": "<mask token>\n\n\ndef add_nodes(g):\n nodes = ['a', 'b', 'c', 'd']\n for n in nodes:\n g.add_node(n)\n\n\ndef add_desc(g):\n desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for d in desc:\n g.add_desc(d)\n\n\ndef add_edges(g):\n edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for e in edges:\n g.add_edge(e)\n\n\n<mask token>\n\n\ndef is_blocked(path, obs_dict, g):\n prev_edge = []\n for cur_edge in path:\n if prev_edge:\n prev_node, prev_dir = prev_edge\n cur_node, cur_dir = cur_edge\n if prev_dir == 1 and cur_dir == 0:\n blocking_v = True\n for n in g.nodes[prev_node].desc:\n if obs_dict[n]:\n blocking_v = False\n if blocking_v:\n return True\n elif obs_dict[prev_node]:\n return True\n prev_edge = cur_edge\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_nodes(g):\n nodes = ['a', 'b', 'c', 'd']\n for n in nodes:\n g.add_node(n)\n\n\ndef add_desc(g):\n desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for d in desc:\n g.add_desc(d)\n\n\ndef add_edges(g):\n edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for e in edges:\n g.add_edge(e)\n\n\ndef read_all_paths(n):\n all_paths = {}\n with open(n + '.txt', 'r') as infile:\n for line in infile:\n path = ast.literal_eval(line)\n if path:\n dest = path[-1][0]\n if dest in all_paths:\n all_paths[dest].append(path)\n else:\n all_paths[dest] = [path]\n return all_paths\n\n\ndef is_blocked(path, obs_dict, g):\n prev_edge = []\n for cur_edge in path:\n if prev_edge:\n prev_node, prev_dir = prev_edge\n cur_node, cur_dir = cur_edge\n if prev_dir == 1 and cur_dir == 0:\n blocking_v = True\n for n in g.nodes[prev_node].desc:\n if obs_dict[n]:\n blocking_v = False\n if blocking_v:\n return True\n elif obs_dict[prev_node]:\n return True\n prev_edge = cur_edge\n return False\n\n\ndef is_indep(obs_dict, all_paths, g):\n for path in all_paths:\n block = is_blocked(path, obs_dict, g)\n if block:\n continue\n else:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_nodes(g):\n nodes = ['a', 'b', 'c', 'd']\n for n in nodes:\n g.add_node(n)\n\n\ndef add_desc(g):\n desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for d in desc:\n g.add_desc(d)\n\n\ndef add_edges(g):\n edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for e in edges:\n g.add_edge(e)\n\n\ndef read_all_paths(n):\n all_paths = {}\n with open(n + '.txt', 'r') as infile:\n for line in infile:\n path = ast.literal_eval(line)\n if path:\n dest = path[-1][0]\n if dest in all_paths:\n all_paths[dest].append(path)\n else:\n all_paths[dest] = [path]\n return all_paths\n\n\ndef is_blocked(path, obs_dict, g):\n prev_edge = []\n for cur_edge in path:\n if prev_edge:\n prev_node, prev_dir = prev_edge\n cur_node, cur_dir = cur_edge\n if prev_dir == 1 and cur_dir == 0:\n blocking_v = True\n for n in g.nodes[prev_node].desc:\n if obs_dict[n]:\n blocking_v = False\n if blocking_v:\n return True\n elif obs_dict[prev_node]:\n return True\n prev_edge = cur_edge\n return False\n\n\ndef is_indep(obs_dict, all_paths, g):\n for path in all_paths:\n block = is_blocked(path, obs_dict, g)\n if block:\n continue\n else:\n return False\n return True\n\n\nif __name__ == '__main__':\n g = Graph()\n add_nodes(g)\n add_edges(g)\n add_desc(g)\n g.print_all_edges()\n g.print_all_descs()\n for n in g.nodes.keys():\n g.get_all_paths(n, n)\n all_nodes = list(g.nodes.keys())\n all_paths = {}\n for n in all_nodes:\n all_paths[n] = read_all_paths(n)\n s = len(all_nodes)\n obs_dict = {}\n combs = list(itertools.product([0, 1], repeat=s))\n for c in combs:\n for n, val in zip(all_nodes, c):\n obs_dict[n] = val\n for i, j in itertools.combinations(all_nodes, 2):\n indep = is_indep(obs_dict, all_paths[i][j], g)\n if indep:\n observed = [all_nodes[idx] for idx, val in enumerate(c) if val]\n if not i in observed and not j in observed:\n print(i, j, str(observed))\n g.reset_files()\n",
"step-4": "from graph import Graph\nimport ast\nimport itertools\n\n\ndef add_nodes(g):\n nodes = ['a', 'b', 'c', 'd']\n for n in nodes:\n g.add_node(n)\n\n\ndef add_desc(g):\n desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for d in desc:\n g.add_desc(d)\n\n\ndef add_edges(g):\n edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n for e in edges:\n g.add_edge(e)\n\n\ndef read_all_paths(n):\n all_paths = {}\n with open(n + '.txt', 'r') as infile:\n for line in infile:\n path = ast.literal_eval(line)\n if path:\n dest = path[-1][0]\n if dest in all_paths:\n all_paths[dest].append(path)\n else:\n all_paths[dest] = [path]\n return all_paths\n\n\ndef is_blocked(path, obs_dict, g):\n prev_edge = []\n for cur_edge in path:\n if prev_edge:\n prev_node, prev_dir = prev_edge\n cur_node, cur_dir = cur_edge\n if prev_dir == 1 and cur_dir == 0:\n blocking_v = True\n for n in g.nodes[prev_node].desc:\n if obs_dict[n]:\n blocking_v = False\n if blocking_v:\n return True\n elif obs_dict[prev_node]:\n return True\n prev_edge = cur_edge\n return False\n\n\ndef is_indep(obs_dict, all_paths, g):\n for path in all_paths:\n block = is_blocked(path, obs_dict, g)\n if block:\n continue\n else:\n return False\n return True\n\n\nif __name__ == '__main__':\n g = Graph()\n add_nodes(g)\n add_edges(g)\n add_desc(g)\n g.print_all_edges()\n g.print_all_descs()\n for n in g.nodes.keys():\n g.get_all_paths(n, n)\n all_nodes = list(g.nodes.keys())\n all_paths = {}\n for n in all_nodes:\n all_paths[n] = read_all_paths(n)\n s = len(all_nodes)\n obs_dict = {}\n combs = list(itertools.product([0, 1], repeat=s))\n for c in combs:\n for n, val in zip(all_nodes, c):\n obs_dict[n] = val\n for i, j in itertools.combinations(all_nodes, 2):\n indep = is_indep(obs_dict, all_paths[i][j], g)\n if indep:\n observed = [all_nodes[idx] for idx, val in enumerate(c) if val]\n if not i in observed and not j in observed:\n print(i, j, str(observed))\n g.reset_files()\n",
"step-5": "from graph import Graph\nimport ast\nimport itertools\n\n\ndef add_nodes(g):\n\n nodes = ['a', 'b', 'c', 'd']\n for n in nodes:\n g.add_node(n)\n\ndef add_desc(g):\n desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n\n for d in desc:\n g.add_desc(d)\n\ndef add_edges(g):\n\n edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]\n\n for e in edges:\n g.add_edge(e)\n\ndef read_all_paths(n):\n all_paths = {}\n with open(n+'.txt', 'r') as infile:\n for line in infile:\n path = ast.literal_eval(line)\n if path:\n dest = path[-1][0]\n if dest in all_paths:\n all_paths[dest].append(path)\n else:\n all_paths[dest] = [path]\n\n return all_paths\n\ndef is_blocked(path, obs_dict, g):\n\n prev_edge = []\n\n for cur_edge in path:\n # try to find blocking transitions - either non-observed v-structures, or observed regulars\n\n if prev_edge:\n prev_node, prev_dir = prev_edge\n cur_node, cur_dir = cur_edge\n\n if prev_dir == 1 and cur_dir == 0:\n # V-structure\n\n blocking_v = True\n for n in g.nodes[prev_node].desc:\n if obs_dict[n]:\n blocking_v = False\n\n if blocking_v:\n return True\n\n\n else:\n # not V-structure\n\n if obs_dict[prev_node]:\n return True\n\n prev_edge = cur_edge\n\n return False\n\n\n\ndef is_indep(obs_dict, all_paths, g):\n\n for path in all_paths:\n block = is_blocked(path, obs_dict, g)\n if block:\n continue\n else:\n # we have found a non-blocked path, so indep does not hold\n return False\n\n return True\n\nif __name__=='__main__':\n\n g = Graph()\n add_nodes(g)\n add_edges(g)\n add_desc(g)\n g.print_all_edges()\n g.print_all_descs()\n for n in g.nodes.keys():\n g.get_all_paths(n, n)\n\n all_nodes = list(g.nodes.keys())\n\n all_paths = {}\n for n in all_nodes:\n all_paths[n] = read_all_paths(n)\n\n s = len(all_nodes)\n obs_dict = {}\n\n combs = list(itertools.product([0,1], repeat = s))\n for c in combs:\n for n, val in zip(all_nodes, c):\n obs_dict[n] = val\n\n for i, j in itertools.combinations(all_nodes, 2):\n\n indep = is_indep(obs_dict, all_paths[i][j], g)\n if indep:\n observed = [all_nodes[idx] for idx, val in enumerate(c) if val]\n if (not (i in observed)) and (not (j in observed)):\n print(i, j, str(observed))\n # print(i, j, str([all_nodes[idx] for idx, val in enumerate(c) if val]))\n\n\n g.reset_files()\n\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conf = SparkConf().setAppName('same_host').setMaster('local')
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile(
'/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')
augFirstLogs = sc.textFile(
'/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')
julyFirstLogs = julyFirstLogs.map(lambda line: line.split('\t')[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split('\t')[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != 'host')
cleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')
<|reserved_special_token_1|>
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName('same_host').setMaster('local')
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile(
'/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')
augFirstLogs = sc.textFile(
'/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')
julyFirstLogs = julyFirstLogs.map(lambda line: line.split('\t')[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split('\t')[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != 'host')
cleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')
<|reserved_special_token_1|>
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("same_host").setMaster("local")
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv")
augFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv")
julyFirstLogs = julyFirstLogs.map(lambda line: line.split("\t")[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split("\t")[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != "host")
cleanedHostIntersection.saveAsTextFile("out/nasa_logs_same_hosts.csv")
|
flexible
|
{
"blob_id": "36fce3837e0341d94ff6099a06be8cf757a1cfa9",
"index": 3596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-3": "<mask token>\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-4": "from pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-5": "from pyspark import SparkContext, SparkConf\n\nconf = SparkConf().setAppName(\"same_host\").setMaster(\"local\")\nsc = SparkContext(conf=conf)\n\njulyFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv\")\naugFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv\")\n\n\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split(\"\\t\")[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split(\"\\t\")[0])\n\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != \"host\")\ncleanedHostIntersection.saveAsTextFile(\"out/nasa_logs_same_hosts.csv\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class FreeReplier(RegularReplier):
<|reserved_special_token_0|>
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
<|reserved_special_token_0|>
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods['jset.query'] = self.jset.handle
self.methods['jsd.query'] = self.jsd.handle
self.methods['jsi.query'] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
def start(self):
self._running = True
self.thread.start()
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get('method', None) == '.sys.heartbeat':
return self.methods['.sys.heartbeat'](message)
else:
self.input.put([client, message])
logging.debug('queue size | %s', self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
<|reserved_special_token_1|>
from jaqsmds.server.repliers.basic import RegularReplier
from jaqsmds.server.repliers.handlers import JsetHandler, JsdHandler, JsiHandler
from queue import Queue, Empty
from threading import Thread
import logging
class FreeReplier(RegularReplier):
def __init__(self):
super(FreeReplier, self).__init__()
self.jset = JsetHandler()
self.jsd = JsdHandler()
self.jsi = JsiHandler()
self.methods["jset.query"] = self.jset.handle
self.methods["jsd.query"] = self.jsd.handle
self.methods["jsi.query"] = self.jsi.handle
self.input = Queue()
self.output = Queue()
self._running = False
self.thread = Thread(target=self.run)
def run(self):
while self._running or self.input.qsize():
try:
client, message = self.input.get(timeout=2)
except Empty:
continue
result = self.handle(message)
self.output.put([client, result])
def start(self):
self._running = True
self.thread.start()
def stop(self):
self._running = False
self.thread.join()
@property
def unfinished(self):
return self.input.qsize() + self.output.qsize()
def put(self, client, message):
if message.get("method", None) == ".sys.heartbeat":
return self.methods[".sys.heartbeat"](message)
else:
self.input.put([client, message])
logging.debug("queue size | %s", self.input.qsize())
def get_output(self, timeout=0.001):
return self.output.get(timeout=timeout)
|
flexible
|
{
"blob_id": "42ebd42801b7d1563c9f204f296afba5fa3c6d3c",
"index": 1592,
"step-1": "<mask token>\n\n\nclass FreeReplier(RegularReplier):\n <mask token>\n\n def run(self):\n while self._running or self.input.qsize():\n try:\n client, message = self.input.get(timeout=2)\n except Empty:\n continue\n result = self.handle(message)\n self.output.put([client, result])\n <mask token>\n <mask token>\n\n @property\n def unfinished(self):\n return self.input.qsize() + self.output.qsize()\n\n def put(self, client, message):\n if message.get('method', None) == '.sys.heartbeat':\n return self.methods['.sys.heartbeat'](message)\n else:\n self.input.put([client, message])\n logging.debug('queue size | %s', self.input.qsize())\n\n def get_output(self, timeout=0.001):\n return self.output.get(timeout=timeout)\n",
"step-2": "<mask token>\n\n\nclass FreeReplier(RegularReplier):\n\n def __init__(self):\n super(FreeReplier, self).__init__()\n self.jset = JsetHandler()\n self.jsd = JsdHandler()\n self.jsi = JsiHandler()\n self.methods['jset.query'] = self.jset.handle\n self.methods['jsd.query'] = self.jsd.handle\n self.methods['jsi.query'] = self.jsi.handle\n self.input = Queue()\n self.output = Queue()\n self._running = False\n self.thread = Thread(target=self.run)\n\n def run(self):\n while self._running or self.input.qsize():\n try:\n client, message = self.input.get(timeout=2)\n except Empty:\n continue\n result = self.handle(message)\n self.output.put([client, result])\n <mask token>\n <mask token>\n\n @property\n def unfinished(self):\n return self.input.qsize() + self.output.qsize()\n\n def put(self, client, message):\n if message.get('method', None) == '.sys.heartbeat':\n return self.methods['.sys.heartbeat'](message)\n else:\n self.input.put([client, message])\n logging.debug('queue size | %s', self.input.qsize())\n\n def get_output(self, timeout=0.001):\n return self.output.get(timeout=timeout)\n",
"step-3": "<mask token>\n\n\nclass FreeReplier(RegularReplier):\n\n def __init__(self):\n super(FreeReplier, self).__init__()\n self.jset = JsetHandler()\n self.jsd = JsdHandler()\n self.jsi = JsiHandler()\n self.methods['jset.query'] = self.jset.handle\n self.methods['jsd.query'] = self.jsd.handle\n self.methods['jsi.query'] = self.jsi.handle\n self.input = Queue()\n self.output = Queue()\n self._running = False\n self.thread = Thread(target=self.run)\n\n def run(self):\n while self._running or self.input.qsize():\n try:\n client, message = self.input.get(timeout=2)\n except Empty:\n continue\n result = self.handle(message)\n self.output.put([client, result])\n <mask token>\n\n def stop(self):\n self._running = False\n self.thread.join()\n\n @property\n def unfinished(self):\n return self.input.qsize() + self.output.qsize()\n\n def put(self, client, message):\n if message.get('method', None) == '.sys.heartbeat':\n return self.methods['.sys.heartbeat'](message)\n else:\n self.input.put([client, message])\n logging.debug('queue size | %s', self.input.qsize())\n\n def get_output(self, timeout=0.001):\n return self.output.get(timeout=timeout)\n",
"step-4": "<mask token>\n\n\nclass FreeReplier(RegularReplier):\n\n def __init__(self):\n super(FreeReplier, self).__init__()\n self.jset = JsetHandler()\n self.jsd = JsdHandler()\n self.jsi = JsiHandler()\n self.methods['jset.query'] = self.jset.handle\n self.methods['jsd.query'] = self.jsd.handle\n self.methods['jsi.query'] = self.jsi.handle\n self.input = Queue()\n self.output = Queue()\n self._running = False\n self.thread = Thread(target=self.run)\n\n def run(self):\n while self._running or self.input.qsize():\n try:\n client, message = self.input.get(timeout=2)\n except Empty:\n continue\n result = self.handle(message)\n self.output.put([client, result])\n\n def start(self):\n self._running = True\n self.thread.start()\n\n def stop(self):\n self._running = False\n self.thread.join()\n\n @property\n def unfinished(self):\n return self.input.qsize() + self.output.qsize()\n\n def put(self, client, message):\n if message.get('method', None) == '.sys.heartbeat':\n return self.methods['.sys.heartbeat'](message)\n else:\n self.input.put([client, message])\n logging.debug('queue size | %s', self.input.qsize())\n\n def get_output(self, timeout=0.001):\n return self.output.get(timeout=timeout)\n",
"step-5": "from jaqsmds.server.repliers.basic import RegularReplier\nfrom jaqsmds.server.repliers.handlers import JsetHandler, JsdHandler, JsiHandler\nfrom queue import Queue, Empty\nfrom threading import Thread\nimport logging\n\n\nclass FreeReplier(RegularReplier):\n\n def __init__(self):\n super(FreeReplier, self).__init__()\n self.jset = JsetHandler()\n self.jsd = JsdHandler()\n self.jsi = JsiHandler()\n self.methods[\"jset.query\"] = self.jset.handle\n self.methods[\"jsd.query\"] = self.jsd.handle\n self.methods[\"jsi.query\"] = self.jsi.handle\n self.input = Queue()\n self.output = Queue()\n self._running = False\n self.thread = Thread(target=self.run)\n\n def run(self):\n while self._running or self.input.qsize():\n try:\n client, message = self.input.get(timeout=2)\n except Empty:\n continue\n \n result = self.handle(message)\n self.output.put([client, result])\n\n def start(self):\n self._running = True\n self.thread.start()\n\n def stop(self):\n self._running = False\n self.thread.join() \n\n @property\n def unfinished(self):\n return self.input.qsize() + self.output.qsize()\n\n def put(self, client, message):\n if message.get(\"method\", None) == \".sys.heartbeat\":\n return self.methods[\".sys.heartbeat\"](message)\n else:\n self.input.put([client, message])\n logging.debug(\"queue size | %s\", self.input.qsize())\n \n def get_output(self, timeout=0.001):\n return self.output.get(timeout=timeout)",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names = ['z', 'h', 'om', 'ol']
self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]
self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0,
0.0, 0.0)
self.catalog = catalog
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Schechter_unnormed(M, omega, alpha):
"""
Funzione di Schechter non normalizzata
"""
Ms = Mstar(omega)
tmp = 10 ** (-0.4 * (M - Ms))
return tmp ** (alpha + 1.0) * np.exp(-tmp)
def normalise(omega, alpha, Mmin=-30, Mmax=-10):
"""
Normalizzazione funzione di Schechter (todo: fare analitica)
"""
M = np.linspace(Mmin, Mmax, 100)
return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)
[0]) for Mi in M])
<|reserved_special_token_0|>
def Mthreshold(DL, mth=27.0):
"""
Magnitudine assoluta di soglia
"""
return mth - 5.0 * np.log10(100000.0 * DL)
def mabs(m, DL):
return m - 5.0 * np.log10(100000.0 * DL)
def HubbleLaw(D_L, omega):
return D_L * omega.h / 3000.0
<|reserved_special_token_0|>
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names = ['z', 'h', 'om', 'ol']
self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]
self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0,
0.0, 0.0)
self.catalog = catalog
def log_prior(self, x):
if not np.isfinite(super(completeness, self).log_prior(x)):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi, DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
logP += np.log(Schechter(Mabsi, self.omega))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,
dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.
catalog['z']])
return logL
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Mstar(omega):
"""
Calcolo magnitudine di taglio Schechter function
"""
return -20.47 + 5.0 * np.log10(omega.h)
def Schechter_unnormed(M, omega, alpha):
"""
Funzione di Schechter non normalizzata
"""
Ms = Mstar(omega)
tmp = 10 ** (-0.4 * (M - Ms))
return tmp ** (alpha + 1.0) * np.exp(-tmp)
def normalise(omega, alpha, Mmin=-30, Mmax=-10):
"""
Normalizzazione funzione di Schechter (todo: fare analitica)
"""
M = np.linspace(Mmin, Mmax, 100)
return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)
[0]) for Mi in M])
def Schechter(M, omega, alpha=-1.07):
"""
Funzione di Schechter normalizzata
"""
return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,
alpha=alpha)
def Mthreshold(DL, mth=27.0):
"""
Magnitudine assoluta di soglia
"""
return mth - 5.0 * np.log10(100000.0 * DL)
def mabs(m, DL):
return m - 5.0 * np.log10(100000.0 * DL)
def HubbleLaw(D_L, omega):
return D_L * omega.h / 3000.0
def gaussian(x, x0, sigma):
return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *
np.pi))
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names = ['z', 'h', 'om', 'ol']
self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]
self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0,
0.0, 0.0)
self.catalog = catalog
def log_prior(self, x):
if not np.isfinite(super(completeness, self).log_prior(x)):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi, DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
logP += np.log(Schechter(Mabsi, self.omega))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,
dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.
catalog['z']])
return logL
if __name__ == '__main__':
Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[
::100]
M = completeness(Gal_cat)
job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)
job.run()
<|reserved_special_token_1|>
from astropy.coordinates import SkyCoord
import astropy.units as u
from mmlibrary import *
import numpy as np
import lal
from scipy.special import logsumexp
import cpnest, cpnest.model
DL = 33.4
dDL = 3.34
GW = SkyCoord(ra='13h07m05.49s', dec='23d23m02.0s', unit=('hourangle', 'deg'))
def Mstar(omega):
"""
Calcolo magnitudine di taglio Schechter function
"""
return -20.47 + 5.0 * np.log10(omega.h)
def Schechter_unnormed(M, omega, alpha):
"""
Funzione di Schechter non normalizzata
"""
Ms = Mstar(omega)
tmp = 10 ** (-0.4 * (M - Ms))
return tmp ** (alpha + 1.0) * np.exp(-tmp)
def normalise(omega, alpha, Mmin=-30, Mmax=-10):
"""
Normalizzazione funzione di Schechter (todo: fare analitica)
"""
M = np.linspace(Mmin, Mmax, 100)
return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)
[0]) for Mi in M])
def Schechter(M, omega, alpha=-1.07):
"""
Funzione di Schechter normalizzata
"""
return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,
alpha=alpha)
def Mthreshold(DL, mth=27.0):
"""
Magnitudine assoluta di soglia
"""
return mth - 5.0 * np.log10(100000.0 * DL)
def mabs(m, DL):
return m - 5.0 * np.log10(100000.0 * DL)
def HubbleLaw(D_L, omega):
return D_L * omega.h / 3000.0
def gaussian(x, x0, sigma):
return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *
np.pi))
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names = ['z', 'h', 'om', 'ol']
self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]
self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0,
0.0, 0.0)
self.catalog = catalog
def log_prior(self, x):
if not np.isfinite(super(completeness, self).log_prior(x)):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi, DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
logP += np.log(Schechter(Mabsi, self.omega))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,
dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.
catalog['z']])
return logL
if __name__ == '__main__':
Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[
::100]
M = completeness(Gal_cat)
job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)
job.run()
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from setup_env import *
#from mmlibrary import *
from astropy.coordinates import SkyCoord
import astropy.units as u
from mmlibrary import *
import numpy as np
import lal
from scipy.special import logsumexp
import cpnest, cpnest.model
# Oggetto per test: GW170817
#GW = SkyCoord('13h07m05.49s', '23d23m02.0s', unit=(u.hourangle, u.deg))
DL=33.4
dDL=3.34
GW = SkyCoord(ra = '13h07m05.49s', dec = '23d23m02.0s',
unit=('hourangle','deg'))
def Mstar(omega):
'''
Calcolo magnitudine di taglio Schechter function
'''
return -20.47 + 5.0*np.log10(omega.h)
def Schechter_unnormed(M, omega, alpha):
'''
Funzione di Schechter non normalizzata
'''
Ms = Mstar(omega)
tmp = 10**(-0.4*(M-Ms))
return tmp**(alpha+1.0)*np.exp(-tmp)
def normalise(omega, alpha, Mmin = -30,Mmax = -10):
'''
Normalizzazione funzione di Schechter (todo: fare analitica)
'''
M = np.linspace(Mmin, Mmax, 100)
return np.sum([Schechter_unnormed(Mi, omega, alpha = alpha)*np.diff(M)[0] for Mi in M])
def Schechter(M, omega, alpha = -1.07):
'''
Funzione di Schechter normalizzata
'''
return Schechter_unnormed(M, omega, alpha = alpha)/normalise(omega, alpha = alpha)
def Mthreshold(DL, mth = 27.0):
'''
Magnitudine assoluta di soglia
'''
return mth - 5.0*np.log10(1e5*DL)
def mabs(m, DL):
return m - 5.0*np.log10(1e5*DL)
def HubbleLaw(D_L, omega): # Da rivedere: test solo 1 ordine
return D_L*omega.h/(3e3) # Sicuro del numero?
def gaussian(x,x0,sigma):
return np.exp(-(x-x0)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names=['z', 'h', 'om', 'ol']
self.bounds=[[0.001,0.012],
[0.5,1.],
[0.04,1.],
[0.,1.]]
self.omega = lal.CreateCosmologicalParameters(0.7,0.5,0.5,-1.,0.,0.)
self.catalog = catalog
def log_prior(self, x):
# controllo finitezza e theta(M-Mth)
if not(np.isfinite(super(completeness, self).log_prior(x))):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi,mi in zip(self.catalog['z'],self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi,DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
# Update parametri cosmologici con simulazione
# Calcolo prior. Ciascuna coordinata è pesata con le probabilità
# delle coordinate ('banane') GW, così come z.
# Temporaneamente, è assunta gaussiana intorno a un evento.
logP += np.log(Schechter(Mabsi, self.omega))
#log_P_RA = np.log(gaussian(x['ra'],Gal.ra.rad,Gal.ra.rad/100.))
#log_P_DEC = np.log(gaussian(x['dec'],Gal.dec.rad,Gal.dec.rad/100.))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
# PROBLEMA! Come introduco le delta(ra,dec)?
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi/10.0) for zgi in self.catalog['z']])
#logL += np.log(gaussian(x['ra'],GW.ra.rad,GW.ra.rad/10.))
#logL += np.log(gaussian(x['dec'],GW.dec.rad,GW.dec.rad/10.))
return logL
if __name__ == '__main__':
Gal_cat = GalInABox([190,200],[-25,-15], u.deg, u.deg, catalog='GLADE')[::100]
M = completeness(Gal_cat)
job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)
job.run()
# GLADE galaxy catalog
|
flexible
|
{
"blob_id": "fa5468741e9884f6c8bcacaf9d560b5c93ee781a",
"index": 8906,
"step-1": "<mask token>\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\n<mask token>\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\n<mask token>\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Mstar(omega):\n \"\"\"\n Calcolo magnitudine di taglio Schechter function\n \"\"\"\n return -20.47 + 5.0 * np.log10(omega.h)\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\ndef Schechter(M, omega, alpha=-1.07):\n \"\"\"\n Funzione di Schechter normalizzata\n \"\"\"\n return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,\n alpha=alpha)\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\ndef gaussian(x, x0, sigma):\n return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *\n np.pi))\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[\n ::100]\n M = completeness(Gal_cat)\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n",
"step-4": "from astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom mmlibrary import *\nimport numpy as np\nimport lal\nfrom scipy.special import logsumexp\nimport cpnest, cpnest.model\nDL = 33.4\ndDL = 3.34\nGW = SkyCoord(ra='13h07m05.49s', dec='23d23m02.0s', unit=('hourangle', 'deg'))\n\n\ndef Mstar(omega):\n \"\"\"\n Calcolo magnitudine di taglio Schechter function\n \"\"\"\n return -20.47 + 5.0 * np.log10(omega.h)\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\ndef Schechter(M, omega, alpha=-1.07):\n \"\"\"\n Funzione di Schechter normalizzata\n \"\"\"\n return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,\n alpha=alpha)\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\ndef gaussian(x, x0, sigma):\n return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *\n np.pi))\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[\n ::100]\n M = completeness(Gal_cat)\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#from setup_env import *\n#from mmlibrary import *\n\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n\nfrom mmlibrary import *\n\nimport numpy as np\nimport lal\n\nfrom scipy.special import logsumexp\nimport cpnest, cpnest.model\n\n# Oggetto per test: GW170817\n#GW = SkyCoord('13h07m05.49s', '23d23m02.0s', unit=(u.hourangle, u.deg))\nDL=33.4\ndDL=3.34\n\nGW = SkyCoord(ra = '13h07m05.49s', dec = '23d23m02.0s',\n unit=('hourangle','deg'))\n\n\ndef Mstar(omega):\n '''\n Calcolo magnitudine di taglio Schechter function\n '''\n return -20.47 + 5.0*np.log10(omega.h)\n\ndef Schechter_unnormed(M, omega, alpha):\n '''\n Funzione di Schechter non normalizzata\n '''\n Ms = Mstar(omega)\n tmp = 10**(-0.4*(M-Ms))\n return tmp**(alpha+1.0)*np.exp(-tmp)\n\ndef normalise(omega, alpha, Mmin = -30,Mmax = -10):\n '''\n Normalizzazione funzione di Schechter (todo: fare analitica)\n '''\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([Schechter_unnormed(Mi, omega, alpha = alpha)*np.diff(M)[0] for Mi in M])\n\ndef Schechter(M, omega, alpha = -1.07):\n '''\n Funzione di Schechter normalizzata\n '''\n return Schechter_unnormed(M, omega, alpha = alpha)/normalise(omega, alpha = alpha)\n\ndef Mthreshold(DL, mth = 27.0):\n '''\n Magnitudine assoluta di soglia\n '''\n return mth - 5.0*np.log10(1e5*DL)\n\ndef mabs(m, DL):\n return m - 5.0*np.log10(1e5*DL)\n\n\ndef HubbleLaw(D_L, omega): # Da rivedere: test solo 1 ordine\n return D_L*omega.h/(3e3) # Sicuro del numero?\n\ndef gaussian(x,x0,sigma):\n return np.exp(-(x-x0)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names=['z', 'h', 'om', 'ol']\n self.bounds=[[0.001,0.012],\n [0.5,1.],\n [0.04,1.],\n [0.,1.]]\n self.omega = lal.CreateCosmologicalParameters(0.7,0.5,0.5,-1.,0.,0.)\n self.catalog = catalog\n\n\n def log_prior(self, x):\n # controllo finitezza e theta(M-Mth)\n\n if not(np.isfinite(super(completeness, self).log_prior(x))):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi,mi in zip(self.catalog['z'],self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi,DL)\n if Mthreshold(DL) < Mabsi:\n\n return -np.inf\n else:\n # Update parametri cosmologici con simulazione\n\n # Calcolo prior. Ciascuna coordinata è pesata con le probabilità\n # delle coordinate ('banane') GW, così come z.\n # Temporaneamente, è assunta gaussiana intorno a un evento.\n logP += np.log(Schechter(Mabsi, self.omega))\n #log_P_RA = np.log(gaussian(x['ra'],Gal.ra.rad,Gal.ra.rad/100.))\n #log_P_DEC = np.log(gaussian(x['dec'],Gal.dec.rad,Gal.dec.rad/100.))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n\n return logP\n # PROBLEMA! Come introduco le delta(ra,dec)?\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi/10.0) for zgi in self.catalog['z']])\n #logL += np.log(gaussian(x['ra'],GW.ra.rad,GW.ra.rad/10.))\n #logL += np.log(gaussian(x['dec'],GW.dec.rad,GW.dec.rad/10.))\n\n return logL\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190,200],[-25,-15], u.deg, u.deg, catalog='GLADE')[::100]\n M = completeness(Gal_cat)\n\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n# GLADE galaxy catalog\n",
"step-ids": [
2,
9,
13,
15,
16
]
}
|
[
2,
9,
13,
15,
16
] |
<|reserved_special_token_0|>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
<|reserved_special_token_0|>
getData.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
getData = GetData()
getData.main()
<|reserved_special_token_1|>
import urllib.request
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
getData = GetData()
getData.main()
<|reserved_special_token_1|>
import urllib.request
class GetData:
key = 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
url = "http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey=" + key
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open("sample.xml", "wb")
f.write(data)
f.close()
getData = GetData()
getData.main()
|
flexible
|
{
"blob_id": "58ca520a2f43cef26a95de446f9c7a82819b0b66",
"index": 833,
"step-1": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\n<mask token>\ngetData.main()\n",
"step-3": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\ngetData = GetData()\ngetData.main()\n",
"step-4": "import urllib.request\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\ngetData = GetData()\ngetData.main()\n",
"step-5": "import urllib.request\n\nclass GetData:\n key = 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n url = \"http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey=\" + key\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open(\"sample.xml\", \"wb\")\n f.write(data)\n f.close()\n\ngetData = GetData()\ngetData.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
<|reserved_special_token_0|>
def front(self):
if not self.is_empty():
return self.container[-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
def binary_numbers(n):
queue = Queue()
queue.enqueue('1')
for i in range(n):
front = queue.front()
print(' ', front)
queue.enqueue(front + '0')
queue.enqueue(front + '1')
queue.dequeue()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
def binary_numbers(n):
queue = Queue()
queue.enqueue('1')
for i in range(n):
front = queue.front()
print(' ', front)
queue.enqueue(front + '0')
queue.enqueue(front + '1')
queue.dequeue()
if __name__ == '__main__':
binary_numbers(20)
<|reserved_special_token_1|>
from collections import deque
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
def binary_numbers(n):
queue = Queue()
queue.enqueue("1")
for i in range(n):
front = queue.front()
print(" ", front)
queue.enqueue(front + "0")
queue.enqueue(front + "1")
queue.dequeue()
if __name__ == '__main__':
binary_numbers(20)
|
flexible
|
{
"blob_id": "2898506b9fd5b112f93a1ff6b010848244c398bd",
"index": 7197,
"step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n <mask token>\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)\n",
"step-5": "from collections import deque\n\nclass Queue:\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue(\"1\")\n\n for i in range(n):\n front = queue.front()\n print(\" \", front)\n queue.enqueue(front + \"0\")\n queue.enqueue(front + \"1\")\n\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Customer', fields=[(
'phone_number', models.CharField(max_length=232, primary_key=True,
serialize=False)), ('user', models.OneToOneField(on_delete=django.
db.models.deletion.CASCADE, related_name='customer', to=settings.
AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('name', models.CharField(
max_length=232)), ('category', models.CharField(max_length=232)), (
'picture', models.ImageField(upload_to='uploads/')), ('description',
models.TextField(null=True)), ('price', models.DecimalField(
decimal_places=2, max_digits=10))]), migrations.CreateModel(name=
'DishCount', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('count',
models.IntegerField(default=1)), ('dish', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),
migrations.CreateModel(name='Order', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (
'comment', models.TextField(max_length=232, null=True)), (
'person_count', models.IntegerField(default=1)), ('status', models.
IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (
'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',
models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,
to='main.Customer')), ('dishes', models.ManyToManyField(through=
'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=
'Restaurant', fields=[('name', models.CharField(max_length=232)), (
'description', models.TextField(max_length=232)), ('picture',
models.ImageField(upload_to='uploads/')), ('phone_number', models.
CharField(max_length=232, primary_key=True, serialize=False)), (
'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')), ('user', models
.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=
'order', name='restaurant', field=models.ForeignKey(on_delete=
django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),
migrations.AddField(model_name='dishcount', name='order', field=
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'main.Order'))]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Customer', fields=[(
'phone_number', models.CharField(max_length=232, primary_key=True,
serialize=False)), ('user', models.OneToOneField(on_delete=django.
db.models.deletion.CASCADE, related_name='customer', to=settings.
AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('name', models.CharField(
max_length=232)), ('category', models.CharField(max_length=232)), (
'picture', models.ImageField(upload_to='uploads/')), ('description',
models.TextField(null=True)), ('price', models.DecimalField(
decimal_places=2, max_digits=10))]), migrations.CreateModel(name=
'DishCount', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('count',
models.IntegerField(default=1)), ('dish', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),
migrations.CreateModel(name='Order', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (
'comment', models.TextField(max_length=232, null=True)), (
'person_count', models.IntegerField(default=1)), ('status', models.
IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (
'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',
models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,
to='main.Customer')), ('dishes', models.ManyToManyField(through=
'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=
'Restaurant', fields=[('name', models.CharField(max_length=232)), (
'description', models.TextField(max_length=232)), ('picture',
models.ImageField(upload_to='uploads/')), ('phone_number', models.
CharField(max_length=232, primary_key=True, serialize=False)), (
'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')), ('user', models
.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=
'order', name='restaurant', field=models.ForeignKey(on_delete=
django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),
migrations.AddField(model_name='dishcount', name='order', field=
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'main.Order'))]
<|reserved_special_token_1|>
# Generated by Django 2.1.5 on 2019-01-21 22:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=232)),
('category', models.CharField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('description', models.TextField(null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='DishCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=1)),
('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meal_date_time', models.DateTimeField()),
('comment', models.TextField(max_length=232, null=True)),
('person_count', models.IntegerField(default=1)),
('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),
('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('name', models.CharField(max_length=232)),
('description', models.TextField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='order',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),
),
migrations.AddField(
model_name='dishcount',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),
),
]
|
flexible
|
{
"blob_id": "a6cb7a134fb8480d344743bcb7bc8766146d256f",
"index": 8238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Customer', fields=[(\n 'phone_number', models.CharField(max_length=232, primary_key=True,\n serialize=False)), ('user', models.OneToOneField(on_delete=django.\n db.models.deletion.CASCADE, related_name='customer', to=settings.\n AUTH_USER_MODEL))]), migrations.CreateModel(name='Dish', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('name', models.CharField(\n max_length=232)), ('category', models.CharField(max_length=232)), (\n 'picture', models.ImageField(upload_to='uploads/')), ('description',\n models.TextField(null=True)), ('price', models.DecimalField(\n decimal_places=2, max_digits=10))]), migrations.CreateModel(name=\n 'DishCount', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('count',\n models.IntegerField(default=1)), ('dish', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='main.Dish'))]),\n migrations.CreateModel(name='Order', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('meal_date_time', models.DateTimeField()), (\n 'comment', models.TextField(max_length=232, null=True)), (\n 'person_count', models.IntegerField(default=1)), ('status', models.\n IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), (\n 'READY TO MEAL', 3), ('FINISHED', 4)], default=1)), ('customer',\n models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,\n to='main.Customer')), ('dishes', models.ManyToManyField(through=\n 'main.DishCount', to='main.Dish'))]), migrations.CreateModel(name=\n 'Restaurant', fields=[('name', models.CharField(max_length=232)), (\n 'description', models.TextField(max_length=232)), ('picture',\n models.ImageField(upload_to='uploads/')), ('phone_number', models.\n CharField(max_length=232, primary_key=True, serialize=False)), (\n 'coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')), ('user', models\n .OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.AddField(model_name=\n 'order', name='restaurant', field=models.ForeignKey(on_delete=\n django.db.models.deletion.DO_NOTHING, to='main.Restaurant')),\n migrations.AddField(model_name='dishcount', name='order', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.Order'))]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-01-21 22:51\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Dish',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=232)),\n ('category', models.CharField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('description', models.TextField(null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=10)),\n ],\n ),\n migrations.CreateModel(\n name='DishCount',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('count', models.IntegerField(default=1)),\n ('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('meal_date_time', models.DateTimeField()),\n ('comment', models.TextField(max_length=232, null=True)),\n ('person_count', models.IntegerField(default=1)),\n ('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),\n ('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),\n ],\n ),\n migrations.CreateModel(\n name='Restaurant',\n fields=[\n ('name', models.CharField(max_length=232)),\n ('description', models.TextField(max_length=232)),\n ('picture', models.ImageField(upload_to='uploads/')),\n ('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),\n ('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),\n ('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),\n ('dishes', models.ManyToManyField(to='main.Dish')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='order',\n name='restaurant',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),\n ),\n migrations.AddField(\n model_name='dishcount',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import typing
from pydantic import AnyUrl
from .base import FBObject
class MediaPayload(FBObject):
url: AnyUrl
class Coors(FBObject):
lat: float
long: float
class LocationPayload(FBObject):
coordinates: Coors
class AttachmentFallback(FBObject):
title: str
url: AnyUrl
payload: typing.Any = None
type: str = 'fallback'
class Attachment(FBObject):
type: str # template, audio, fallback, file, image, location or video
payload: typing.Union[MediaPayload, Coors, None]
|
normal
|
{
"blob_id": "1f6176e9285d810934ae745cf8759b5cd6f408c8",
"index": 8767,
"step-1": "<mask token>\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n",
"step-2": "<mask token>\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n",
"step-3": "<mask token>\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n",
"step-4": "import typing\nfrom pydantic import AnyUrl\nfrom .base import FBObject\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str\n payload: typing.Union[MediaPayload, Coors, None]\n",
"step-5": "import typing\n\nfrom pydantic import AnyUrl\n\nfrom .base import FBObject\n\n\nclass MediaPayload(FBObject):\n url: AnyUrl\n\n\nclass Coors(FBObject):\n lat: float\n long: float\n\n\nclass LocationPayload(FBObject):\n coordinates: Coors\n\n\nclass AttachmentFallback(FBObject):\n title: str\n url: AnyUrl\n payload: typing.Any = None\n type: str = 'fallback'\n\n\nclass Attachment(FBObject):\n type: str # template, audio, fallback, file, image, location or video\n payload: typing.Union[MediaPayload, Coors, None]\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64,
64, 64]):
skips.append(output)
print(output.shape)
output = Conv2D(filters, (shape, shape), strides=2, padding='same',
activation='relu')(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,
16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = 'relu'
else:
activation = 'softmax'
output = Conv2D(filters if filters != 2 else 2, (shape, shape),
activation=activation, padding='same')(output)
if filters != 2:
output = BatchNormalization(momentum=0.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[
'accuracy'])
return m
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def unet(pretrained_weights=None, input_size=(unet_input_image_size,
unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=
'binary_crossentropy', metrics=['accuracy'])
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64,
64, 64]):
skips.append(output)
print(output.shape)
output = Conv2D(filters, (shape, shape), strides=2, padding='same',
activation='relu')(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,
16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = 'relu'
else:
activation = 'softmax'
output = Conv2D(filters if filters != 2 else 2, (shape, shape),
activation=activation, padding='same')(output)
if filters != 2:
output = BatchNormalization(momentum=0.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[
'accuracy'])
return m
<|reserved_special_token_1|>
<|reserved_special_token_0|>
unet_feature_n = 512
unet_feature_nstep_size = 0.0001
unet_input_image_size = 128
def unet(pretrained_weights=None, input_size=(unet_input_image_size,
unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=
'binary_crossentropy', metrics=['accuracy'])
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64,
64, 64]):
skips.append(output)
print(output.shape)
output = Conv2D(filters, (shape, shape), strides=2, padding='same',
activation='relu')(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,
16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = 'relu'
else:
activation = 'softmax'
output = Conv2D(filters if filters != 2 else 2, (shape, shape),
activation=activation, padding='same')(output)
if filters != 2:
output = BatchNormalization(momentum=0.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[
'accuracy'])
return m
<|reserved_special_token_1|>
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
unet_feature_n = 512
unet_feature_nstep_size = 0.0001
unet_input_image_size = 128
def unet(pretrained_weights=None, input_size=(unet_input_image_size,
unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',
kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=
'same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=
'binary_crossentropy', metrics=['accuracy'])
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64,
64, 64]):
skips.append(output)
print(output.shape)
output = Conv2D(filters, (shape, shape), strides=2, padding='same',
activation='relu')(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,
16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = 'relu'
else:
activation = 'softmax'
output = Conv2D(filters if filters != 2 else 2, (shape, shape),
activation=activation, padding='same')(output)
if filters != 2:
output = BatchNormalization(momentum=0.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[
'accuracy'])
return m
<|reserved_special_token_1|>
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
unet_feature_n = 512
unet_feature_nstep_size = 1e-4
unet_input_image_size = 128
def unet(pretrained_weights=None, input_size=(unet_input_image_size, unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss='binary_crossentropy', metrics=['accuracy'])
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, 64, 64]):
skips.append(output)
print(output.shape)
output= Conv2D(filters, (shape, shape), strides=2, padding="same", activation="relu")(output)
#output = BatchNormalization()(output)
#if shape != 7:
# output = BatchNormalization()(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64,32, 16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = "relu"
else:
activation = "softmax"
output = Conv2D(filters if filters != 2 else 2, (shape, shape), activation=activation, padding="same")(output)
if filters != 2:
output = BatchNormalization(momentum=.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
return m
|
flexible
|
{
"blob_id": "b8d45a0028cb4e393ddca9dd6d246289328d1791",
"index": 4044,
"step-1": "<mask token>\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-2": "<mask token>\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-3": "<mask token>\nunet_feature_n = 512\nunet_feature_nstep_size = 0.0001\nunet_input_image_size = 128\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-4": "from keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\nunet_feature_n = 512\nunet_feature_nstep_size = 0.0001\nunet_input_image_size = 128\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-5": "from keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\n\nunet_feature_n = 512\nunet_feature_nstep_size = 1e-4\nunet_input_image_size = 128\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size, unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\n\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\n\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\n\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss='binary_crossentropy', metrics=['accuracy'])\n\n if (pretrained_weights):\n model.load_weights(pretrained_weights)\n\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, 64, 64]):\n skips.append(output)\n print(output.shape)\n output= Conv2D(filters, (shape, shape), strides=2, padding=\"same\", activation=\"relu\")(output)\n #output = BatchNormalization()(output)\n #if shape != 7:\n # output = BatchNormalization()(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64,32, 16, 2]):\n output = UpSampling2D()(output)\n\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n\n if filters != 2:\n activation = \"relu\"\n else:\n activation = \"softmax\"\n output = Conv2D(filters if filters != 2 else 2, (shape, shape), activation=activation, padding=\"same\")(output)\n \n if filters != 2:\n output = BatchNormalization(momentum=.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])\n return m",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id
) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverStu_id=item.
receiverStu_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=item.receiverStu_id) & Q(
receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
else:
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0, len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2 += 1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1 += 1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1 += 1
else:
msgList.append(msgList2[i2])
i2 += 1
data = {'id': item.id, 'receiver': receiver, 'msgList':
msgList, 'name': receiver + str(identity), 'identity':
identity}
data_list.append(data)
return Response({'status': 200, 'msg': '返回成功', 'data': data_list})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverTea_id=receiver, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=receiver) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,
receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverStu_id=receiverStu_id, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=receiverStu_id) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=
receiverStu_id, receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '发布私信成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id
) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverStu_id=item.
receiverStu_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=item.receiverStu_id) & Q(
receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
else:
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0, len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2 += 1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1 += 1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1 += 1
else:
msgList.append(msgList2[i2])
i2 += 1
data = {'id': item.id, 'receiver': receiver, 'msgList':
msgList, 'name': receiver + str(identity), 'identity':
identity}
data_list.append(data)
return Response({'status': 200, 'msg': '返回成功', 'data': data_list})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverTea_id=receiver, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=receiver) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,
receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverStu_id=receiverStu_id, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=receiverStu_id) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=
receiverStu_id, receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '发布私信成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class enterPrivateLetterView(APIView):
<|reserved_special_token_0|>
class getRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderTea_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverTea_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id
) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverStu_id=item.
receiverStu_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=item.receiverStu_id) & Q(
receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
else:
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0, len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2 += 1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1 += 1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1 += 1
else:
msgList.append(msgList2[i2])
i2 += 1
data = {'id': item.id, 'receiver': receiver, 'msgList':
msgList, 'name': receiver + str(identity), 'identity':
identity}
data_list.append(data)
return Response({'status': 200, 'msg': '返回成功', 'data': data_list})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverTea_id=receiver, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=receiver) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,
receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverStu_id=receiverStu_id, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=receiverStu_id) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=
receiverStu_id, receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '发布私信成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from ex.models import Teacher, Student, Group, Report, TeamEvaluation, PrivateLetter, ChatBoxIsOpen
from django.core import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth.hashers import make_password, check_password
import os
from ex.utils.jwt_auth import create_token, get_user_id
from ex.utils.extensions.auth import JwtQueryParamAuthentication
from django.db.models import Q
class getPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id
) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=user_id) & Q(receiverStu_id=item.
receiverStu_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=item.receiverStu_id) & Q(
receiverTea_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
else:
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=user_id) & Q(receiverTea_id=receiver)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=receiver) & Q(receiverTea_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0, len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2 += 1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1 += 1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1 += 1
else:
msgList.append(msgList2[i2])
i2 += 1
data = {'id': item.id, 'receiver': receiver, 'msgList':
msgList, 'name': receiver + str(identity), 'identity':
identity}
data_list.append(data)
return Response({'status': 200, 'msg': '返回成功', 'data': data_list})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class enterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderTea_id=user_id,
receiverTea_id=receiver, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=receiver) & Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,
receiverTea_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
privateLetter = PrivateLetter(senderTea_id=user_id,
receiverStu_id=receiverStu_id, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=receiverStu_id) & Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=
receiverStu_id, receiverTea_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '发布私信成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class getRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderTea_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverTea_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id
) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverStu_id=item.
receiverStu_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=item.receiverStu_id) & Q(
receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
else:
for item2 in PrivateLetter.objects.filter(Q(
senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 1}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(
senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {'id': item2.id, 'message': item2.message,
'time': str(item2.time.strftime(
'%Y-%m-%d %H:%M:%S')), 'new': item2.new,
'Ienter': 2}
msgList2.append(data)
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0, len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2 += 1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1 += 1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1 += 1
else:
msgList.append(msgList2[i2])
i2 += 1
data = {'id': item.id, 'receiver': receiver, 'msgList':
msgList, 'name': receiver + str(identity), 'identity':
identity}
data_list.append(data)
return Response({'status': 200, 'msg': '返回成功', 'data': data_list})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverTea_id=receiver, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id
=receiver) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,
receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,
receiverStu_id=receiverStu_id, message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=receiverStu_id) & Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=
receiverStu_id, receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '发布私信成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != '':
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != '':
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id
).first().stu_num
data = {'receiver': receiver, 'identity': identity}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1, -1, -1):
if data_list[i]['receiver'] + str(data_list[i]['identity']
) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i][
'identity'])] = '1'
data_list1.append(data_list[i])
return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=Student.objects.filter(
stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver
).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id
=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,
receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({'status': 200, 'msg': '返回成功'})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self,
request)[0]
except Exception as e:
return Response({'status': 403, 'msg': '未登录', 'err': e.args})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {'identity': iden, 'receiver': receiver}
return Response({'status': 200, 'msg': '返回成功', 'data': data})
except Exception as e:
return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from ex.models import Teacher,Student,Group,Report,TeamEvaluation,PrivateLetter,ChatBoxIsOpen
from django.core import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth.hashers import make_password, check_password
# from plane.models import User, Student, LightList, Light, Score, Visit
# from plane.utils.jwt_auth import create_token, get_user_id
# from django.contrib.auth.hashers import make_password, check_password
# from rest_framework.authtoken.models import Token
# from django.contrib.auth import authenticate
import os
from ex.utils.jwt_auth import create_token, get_user_id
from ex.utils.extensions.auth import JwtQueryParamAuthentication
from django.db.models import Q
# Create your views here.
class getPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class enterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderTea_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverTea_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderTea_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverTea_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class getRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderTea_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverTea_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
# lenData = len(data_list1)
# if lenData > 10:
# data_list1 = data_list1[lenData - 10:lenData]
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
# 0:教师,1:学生,2:还未搜索,3:自己,4:用户不存在
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
|
flexible
|
{
"blob_id": "4b5794ff79371c2e49c5d2b621805b08c4ff7acb",
"index": 8898,
"step-1": "<mask token>\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-2": "<mask token>\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-3": "<mask token>\n\n\nclass enterPrivateLetterView(APIView):\n <mask token>\n\n\nclass getRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom ex.models import Teacher, Student, Group, Report, TeamEvaluation, PrivateLetter, ChatBoxIsOpen\nfrom django.core import serializers\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.hashers import make_password, check_password\nimport os\nfrom ex.utils.jwt_auth import create_token, get_user_id\nfrom ex.utils.extensions.auth import JwtQueryParamAuthentication\nfrom django.db.models import Q\n\n\nclass getPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverTea_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverTea_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass enterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderTea_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverTea_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderTea_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverTea_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass getRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse,JsonResponse\nfrom ex.models import Teacher,Student,Group,Report,TeamEvaluation,PrivateLetter,ChatBoxIsOpen\nfrom django.core import serializers\n\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.hashers import make_password, check_password\n# from plane.models import User, Student, LightList, Light, Score, Visit\n# from plane.utils.jwt_auth import create_token, get_user_id\n# from django.contrib.auth.hashers import make_password, check_password\n\n# from rest_framework.authtoken.models import Token\n# from django.contrib.auth import authenticate\n\nimport os\n\nfrom ex.utils.jwt_auth import create_token, get_user_id\n\nfrom ex.utils.extensions.auth import JwtQueryParamAuthentication\n\nfrom django.db.models import Q\n\n# Create your views here.\n\nclass getPrivateLetterListsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n \n user_id = payload['id']\n \n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverTea_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n # print(len(msgList1))\n else:\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverTea_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0,len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2+=1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1+=1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1+=1\n else:\n msgList.append(msgList2[i2])\n i2+=1\n\n # print(msgList)\n data = {\n 'id': item.id,\n 'receiver': receiver,\n 'msgList': msgList,\n 'name': receiver + str(identity),\n 'identity': identity\n }\n data_list.append(data)\n # print(data_list)\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass enterPrivateLetterView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n\n if identity == 0:\n privateLetter = PrivateLetter(senderTea_id=user_id,receiverTea_id=receiver,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverTea_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n privateLetter = PrivateLetter(senderTea_id=user_id,receiverStu_id=receiverStu_id,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverTea_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '发布私信成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 获取最近联系人\nclass getRecentContactsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n \n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != \"\":\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n # print(((receiver + str(identity)) not in dict))\n # if (receiver + str(identity)) not in dict:\n # dict[receiver + str(identity)] = '1'\n data = {\n # 'id': item.id,\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != \"\":\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num\n # print(((receiver + str(identity)) not in dict))\n # if (receiver + str(identity)) not in dict:\n # dict[receiver + str(identity)] = '1'\n data = {\n # 'id': item.id,\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1,-1,-1):\n if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'\n data_list1.append(data_list[i])\n\n # lenData = len(data_list1)\n # if lenData > 10:\n # data_list1 = data_list1[lenData - 10:lenData]\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list1\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 关闭聊天框\nclass closeChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n \n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 打开聊天框\nclass openChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n \n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 搜索联系人\nclass searchContactView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n # print(receiver,identity=='0')\n\n # user = Teacher.objects.filter(id=username).first()\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {\n 'identity': iden,\n 'receiver': receiver\n }\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass stuGetPrivateLetterListsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n username = payload['username']\n # print(user_id,username)\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverStu_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n # print(len(msgList1))\n else:\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0,len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2+=1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1+=1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1+=1\n else:\n msgList.append(msgList2[i2])\n i2+=1\n\n # print(msgList)\n data = {\n 'id': item.id,\n 'receiver': receiver,\n 'msgList': msgList,\n 'name': receiver + str(identity),\n 'identity': identity\n }\n data_list.append(data)\n # print(data_list)\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass stuEnterPrivateLetterView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n username = payload['username']\n # print(user_id,username)\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,receiverTea_id=receiver,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,receiverStu_id=receiverStu_id,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '发布私信成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 获取最近联系人\nclass stuRecentContactsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n \n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != \"\":\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n data = {\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != \"\":\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num\n data = {\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1,-1,-1):\n if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'\n data_list1.append(data_list[i])\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list1\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 关闭聊天框\nclass stuCloseChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n \n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 打开聊天框\nclass stuOpenChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n \n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 搜索联系人\nclass stuSearchContactView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n # print(receiver,identity=='0')\n\n # user = Teacher.objects.filter(id=username).first()\n # 0:教师,1:学生,2:还未搜索,3:自己,4:用户不存在\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {\n 'identity': iden,\n 'receiver': receiver\n }\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n",
"step-ids": [
14,
18,
21,
25,
26
]
}
|
[
14,
18,
21,
25,
26
] |
from random import randint
import matplotlib.pyplot as plt
def generate_list(length: int) -> list:
"""Generate a list with given length with random integer values in the interval [0, length]
Args:
length (int): List length
Returns:
list: List generated with random values
"""
return [randint(0, length + 1) for _ in range(length)]
def plot_table(timestamps: dict, threadList: list, mList: list) -> None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
|
normal
|
{
"blob_id": "8804bfc5bed8b93e50279f0cbab561fe09d92a64",
"index": 6522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-3": "<mask token>\n\n\ndef generate_list(length: int) ->list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n return [randint(0, length + 1) for _ in range(length)]\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-4": "from random import randint\nimport matplotlib.pyplot as plt\n\n\ndef generate_list(length: int) ->list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n return [randint(0, length + 1) for _ in range(length)]\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-5": "from random import randint\nimport matplotlib.pyplot as plt\n\ndef generate_list(length: int) -> list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n\n return [randint(0, length + 1) for _ in range(length)]\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from mpi4py import MPI
import matplotlib
from tmm import coh_tmm
import pandas as pd
import os
from numpy import pi
from scipy.interpolate import interp1d
from joblib import Parallel, delayed
import numpy as np
import glob
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
from scipy.optimize import minimize
import json
from tqdm import tqdm
DATABASE = './data'
INSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']
METALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']
num_workers = 8
def cal_reward(R, T, A, target):
'''
Calculate reward based on given spectrums.
We calculate the reward using averaged (1-mse).
Args:
R, T, A: numpy array. Reflection, transmission, and
absorption spectrums, respectively.
target: dict. {'R':np.array, 'T':np.array, 'A':np.array}
Returns:
reward: float. Reward for the spectrum.
'''
reward = 0
for k, v in target.items():
if k == 'R':
res = R
elif k == 'T':
res = T
else:
res = A
reward += 1 - np.abs(res.squeeze() - v).mean()
reward /= len(target)
return reward
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
def batch_spectrum(env, names_list, thickness_list):
def spectrum(args):
'''
Inputs:
1. names: list of lists, each list correspond to the structures
2. thickness: list of lists
'''
names, thickness = args
R, T, A = env.spectrum(names, thickness, 0, False)
return R, T, A
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x],
df['AverageEpRet']-width/2*df['StdEpRet'],
df['AverageEpRet']+width/2*df['StdEpRet'],
alpha=0.3, color=color)
return df
def combine_tracker(folder):
'''
Merge all buffers
'''
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size':14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2,1, figsize=(10,8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp+'_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder+'/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep
else:
hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first=True # first pandas file to load
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder+'/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = pd.concat([df, df_], axis=0)
return df
def finetune(simulator, m0, x0, target, display=False, bounds=None):
'''
Finetune the structure using quasi-Newton's method.
Args:
m0: materials list given by the upstream RL
x0: thicknesses given by the upstream RL
display: if true, then plot the spectrum before and after the finetuning.
Returns:
x_opt: finetuned thickness list
'''
def objective_func(x):
R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])
return 1-cal_reward(R, T, A, target)
if bounds is None:
bounds = [(15, 200)] * len(x0)
res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})
x_opt = [int(item) for item in res.x]
if display:
plt.figure()
simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)
plt.figure()
simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)
return x_opt, res
|
normal
|
{
"blob_id": "f23bc0c277967d8e7a94a49c5a81ed5fb75d36cc",
"index": 9327,
"step-1": "<mask token>\n\n\nclass Memory:\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n",
"step-4": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n \"\"\"\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n \"\"\"\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n return R, T, A\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args) for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep='\\t')\n width = 0.5\n if ax is None:\n fig, ax = plt.subplots(2, 1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha\n )\n sns.lineplot(x=x, y='AverageEpRet', data=df, ax=ax[1], color=color,\n alpha=alpha)\n plt.fill_between(df[x], df['AverageEpRet'] - width / 2 * df['StdEpRet'],\n df['AverageEpRet'] + width / 2 * df['StdEpRet'], alpha=0.3, color=color\n )\n return df\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n",
"step-5": "from mpi4py import MPI\nimport matplotlib\nfrom tmm import coh_tmm\nimport pandas as pd\nimport os\nfrom numpy import pi\nfrom scipy.interpolate import interp1d\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport pickle as pkl\nimport seaborn as sns\nfrom scipy.optimize import minimize\nimport json\nfrom tqdm import tqdm\n\nDATABASE = './data'\nINSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']\nMETALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']\n\nnum_workers = 8\n\ndef cal_reward(R, T, A, target):\n '''\n Calculate reward based on given spectrums. \n We calculate the reward using averaged (1-mse).\n\n Args:\n R, T, A: numpy array. Reflection, transmission, and \n absorption spectrums, respectively.\n target: dict. {'R':np.array, 'T':np.array, 'A':np.array}\n\n Returns:\n reward: float. Reward for the spectrum. \n '''\n\n reward = 0\n for k, v in target.items():\n\n if k == 'R':\n res = R\n elif k == 'T':\n res = T\n else:\n res = A\n \n reward += 1 - np.abs(res.squeeze() - v).mean()\n\n reward /= len(target)\n\n return reward\n\n\nclass Memory:\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n '''\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n '''\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n\n return R, T, A\n\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)\n for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n '''\n Merges consecutive layers with the same material types.\n '''\n\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n '''\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n '''\n\n def threshold(value):\n '''\n\n '''\n\n names = [materials[item] for item in categories]\n\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(\n min(max(15, int(value * max_value//2)), max_value))\n elif name in METALS:\n thickness.append(\n min(max(5, int(value * max_value//2)), max_value))\n elif name in INSULATORS:\n thickness.append(\n min(max(1, int(value * max_value//2)), max_value))\n else:\n raise ValueError('Material not known')\n # thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,\n # item in enumerate(values)] + [np.inf]\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\nclass DesignTracker():\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n \n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n\n else:\n if ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n # save buffer from all processes\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n \n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0,0,0):\n break\n read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])\n\n return read_progress\n\ndef print_progress(progress):\n\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]\n\n return progress\n\nclass TMM_sim():\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):\n '''\n This class returns the spectrum given the designed structures.\n '''\n self.mats = mats\n # include substrate\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n '''\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n '''\n nk_dict = {}\n\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n\n\n mat_nk_fn = interp1d(\n mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n '''\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n '''\n degree = pi/180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick) # substrate thickness\n\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1e3):\n\n # we assume the last layer is glass\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]\n\n # n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]\n\n # mport pdb; pdb.set_trace()\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m)\n for d, m in zip(thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n\n plt.plot(self.wavelength * 1000, R, self.wavelength *\n 1000, T, self.wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.\n format(np.mean(R)*100),\n 'T: Average = {:.2f}%'.\n format(np.mean(T)*100),\n 'A: Average = {:.2f}%'.\n format(np.mean(A)*100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n# Plotting utils\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep=\"\\t\")\n width = 0.5\n # x = 'Time'\n if ax is None:\n fig, ax = plt.subplots(2,1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)\n # ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])\n sns.lineplot(x=x, y='AverageEpRet', data=df,\n ax=ax[1], color=color, alpha=alpha)\n plt.fill_between(df[x],\n df['AverageEpRet']-width/2*df['StdEpRet'],\n df['AverageEpRet']+width/2*df['StdEpRet'],\n alpha=0.3, color=color)\n\n return df\n\ndef combine_tracker(folder):\n '''\n Merge all buffers\n '''\n trackers = []\n \n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb'))) \n\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n \n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))\n\n return combined_tracker\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n \n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n\n params = {'size':14}\n matplotlib.rc('font', **params)\n\n fig, ax = plt.subplots(2,1, figsize=(10,8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp+'_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder+'/*')]\n\n def read_hyper(file_name, rep=10):\n\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep\n else:\n hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep\n else: \n hypers_dict[k] = [v] * rep\n \n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df \n\n first=True # first pandas file to load\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder+'/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\\t'))\n for run in runs:\n\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0) \n\n return df \n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n '''\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n '''\n \n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])\n return 1-cal_reward(R, T, A, target)\n \n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n \n res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})\n x_opt = [int(item) for item in res.x]\n \n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)\n \n return x_opt, res\n",
"step-ids": [
11,
16,
20,
22,
26
]
}
|
[
11,
16,
20,
22,
26
] |
def test_number():
pass
|
normal
|
{
"blob_id": "687ab41e9ce94c8d14154a941504845a8fa9f2d9",
"index": 8660,
"step-1": "<mask token>\n",
"step-2": "def test_number():\n pass\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot3D(xValues, labels, figure = 0):
minClass = min(labels)
numberOfClasses = int(max(labels) - minClass)
fig = plt.figure(figure)
ax = plt.axes(projection='3d')
colors = ["r", "b", "y", "c", "m"]
for i in range(numberOfClasses+1):
classLocation = np.argwhere(labels == i+minClass)
ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1], xValues[classLocation, 2]) #3D
|
normal
|
{
"blob_id": "8dfd92ab0ce0e71b41ce94bd8fcf057c8995a2a4",
"index": 1668,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot3D(xValues, labels, figure=0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = ['r', 'b', 'y', 'c', 'm']\n for i in range(numberOfClasses + 1):\n classLocation = np.argwhere(labels == i + minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],\n xValues[classLocation, 2])\n",
"step-3": "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n\ndef plot3D(xValues, labels, figure=0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = ['r', 'b', 'y', 'c', 'm']\n for i in range(numberOfClasses + 1):\n classLocation = np.argwhere(labels == i + minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],\n xValues[classLocation, 2])\n",
"step-4": "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef plot3D(xValues, labels, figure = 0):\n minClass = min(labels)\n numberOfClasses = int(max(labels) - minClass)\n\n fig = plt.figure(figure)\n ax = plt.axes(projection='3d')\n colors = [\"r\", \"b\", \"y\", \"c\", \"m\"]\n for i in range(numberOfClasses+1):\n classLocation = np.argwhere(labels == i+minClass)\n ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1], xValues[classLocation, 2]) #3D\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from PyQt5 import QtCore, QtWidgets
from .main_window_base import Ui_MainWindow
from .custom_sort_filter_proxy_model import CustomSortFilterProxyModel
from .tree_model import TreeModel
model_filename = "widgets/default.txt"
class MainWindow(Ui_MainWindow, QtCore.QObject):
def __init__(self, qmain_window):
super().__init__()
self.setupUi(qmain_window)
self._proxy_model = CustomSortFilterProxyModel(self)
self._model = TreeModel(model_filename)
self._proxy_model.setSourceModel(self._model)
self.treeView.setModel(self._proxy_model)
self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# Attach slot/signals
self.filterPatternEdit.editingFinished.connect(
lambda: self._proxy_model.update_filter_pattern(self.filterPatternEdit.text()))
self.filterSyntaxComboBox.currentTextChanged.connect(self._proxy_model.update_filter_syntax)
self.filterColumnComboBox.currentTextChanged.connect(self._proxy_model.update_filter_column)
self.caseSensitiveFilterCB.stateChanged.connect(
lambda state: self._proxy_model.update_case_sensitive_filter(state))
self.caseSensitiveSortingCB.stateChanged.connect(
lambda state: self._proxy_model.update_case_sensitive_sort(state))
|
normal
|
{
"blob_id": "7a918518d8c9ff1184a634d1a5c799e735dfbc8a",
"index": 1707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n",
"step-3": "<mask token>\nmodel_filename = 'widgets/default.txt'\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n",
"step-4": "from PyQt5 import QtCore, QtWidgets\nfrom .main_window_base import Ui_MainWindow\nfrom .custom_sort_filter_proxy_model import CustomSortFilterProxyModel\nfrom .tree_model import TreeModel\nmodel_filename = 'widgets/default.txt'\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n",
"step-5": "\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom .main_window_base import Ui_MainWindow\nfrom .custom_sort_filter_proxy_model import CustomSortFilterProxyModel\nfrom .tree_model import TreeModel\n\nmodel_filename = \"widgets/default.txt\"\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n\n # Attach slot/signals\n self.filterPatternEdit.editingFinished.connect(\n lambda: self._proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self._proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self._proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(\n lambda state: self._proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(\n lambda state: self._proxy_model.update_case_sensitive_sort(state))\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
apiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',
include('meeting.urls')), url('^docs/', include(
'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(
'^fcm/devices/', include('device.urls')), url('^statistics/', include(
'stats.urls')), url('^admin/', include('admin.urls'))]
urlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.
site.urls)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.conf.urls import url, include
from django.contrib import admin
apiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',
include('meeting.urls')), url('^docs/', include(
'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(
'^fcm/devices/', include('device.urls')), url('^statistics/', include(
'stats.urls')), url('^admin/', include('admin.urls'))]
urlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.
site.urls)]
<|reserved_special_token_1|>
"""Defines all Rady URL."""
from django.conf.urls import url, include
from django.contrib import admin
apiv1_urls = [
url(r"^users/", include("user.urls")),
url(r"^meetings/", include("meeting.urls")),
url(r"^docs/", include("rest_framework_docs.urls")),
url(r"^auth/", include("auth.urls")),
url(r"^fcm/devices/", include("device.urls")),
url(r"^statistics/", include("stats.urls")),
url(r"^admin/", include("admin.urls")),
]
urlpatterns = [
url(r"^api/v1/", include(apiv1_urls)),
url(r"^admin/", admin.site.urls),
]
|
flexible
|
{
"blob_id": "aa00e4569aeae58e3f0ea1a8326e35c0776f7727",
"index": 4849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-3": "<mask token>\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-4": "\"\"\"Defines all Rady URL.\"\"\"\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n\napiv1_urls = [\n url(r\"^users/\", include(\"user.urls\")),\n url(r\"^meetings/\", include(\"meeting.urls\")),\n url(r\"^docs/\", include(\"rest_framework_docs.urls\")),\n url(r\"^auth/\", include(\"auth.urls\")),\n url(r\"^fcm/devices/\", include(\"device.urls\")),\n url(r\"^statistics/\", include(\"stats.urls\")),\n url(r\"^admin/\", include(\"admin.urls\")),\n]\n\nurlpatterns = [\n url(r\"^api/v1/\", include(apiv1_urls)),\n url(r\"^admin/\", admin.site.urls),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SpriteObject(pygame.sprite.Sprite):
<|reserved_special_token_0|>
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
self.mask = pygame.mask.from_surface(self.image)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpriteObject(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color):
pygame.sprite.Sprite.__init__(self)
self.angle = 0
self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)
self.original_image.fill(color)
self.image = self.original_image
self.rect = self.image.get_rect(center=(x, y))
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
self.mask = pygame.mask.from_surface(self.image)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpriteObject(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color):
pygame.sprite.Sprite.__init__(self)
self.angle = 0
self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)
self.original_image.fill(color)
self.image = self.original_image
self.rect = self.image.get_rect(center=(x, y))
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
self.mask = pygame.mask.from_surface(self.image)
pygame.init()
<|reserved_special_token_0|>
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
moving_object.rect.center = pygame.mouse.get_pos()
all_sprites.update()
collide = pygame.sprite.spritecollide(moving_object, static_sprites,
False, pygame.sprite.collide_mask)
window.fill((255, 0, 0) if collide else (255, 255, 255))
all_sprites.draw(window)
pygame.display.update()
pygame.quit()
exit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpriteObject(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color):
pygame.sprite.Sprite.__init__(self)
self.angle = 0
self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)
self.original_image.fill(color)
self.image = self.original_image
self.rect = self.image.get_rect(center=(x, y))
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
self.mask = pygame.mask.from_surface(self.image)
pygame.init()
clock = pygame.time.Clock()
window = pygame.display.set_mode((400, 400))
size = window.get_size()
moving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))
static_objects = [SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128,
128, 128)), SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128,
128, 128)), SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (
128, 128, 128))]
all_sprites = pygame.sprite.Group([moving_object] + static_objects)
static_sprites = pygame.sprite.Group(static_objects)
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
moving_object.rect.center = pygame.mouse.get_pos()
all_sprites.update()
collide = pygame.sprite.spritecollide(moving_object, static_sprites,
False, pygame.sprite.collide_mask)
window.fill((255, 0, 0) if collide else (255, 255, 255))
all_sprites.draw(window)
pygame.display.update()
pygame.quit()
exit()
<|reserved_special_token_1|>
import pygame
class SpriteObject(pygame.sprite.Sprite):
def __init__(self, x, y, w, h, color):
pygame.sprite.Sprite.__init__(self)
self.angle = 0
self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)
self.original_image.fill(color)
self.image = self.original_image
self.rect = self.image.get_rect(center = (x, y))
self.mask = pygame.mask.from_surface(self.image )
def update(self):
self.rotate()
def rotate(self):
self.angle += 0.3
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center = self.rect.center)
self.mask = pygame.mask.from_surface(self.image )
pygame.init()
clock = pygame.time.Clock()
window = pygame.display.set_mode((400, 400))
size = window.get_size()
moving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))
static_objects = [
SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, 128, 128)),
SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128)),
SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128))
]
all_sprites = pygame.sprite.Group([moving_object] + static_objects)
static_sprites = pygame.sprite.Group(static_objects)
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
moving_object.rect.center = pygame.mouse.get_pos()
all_sprites.update()
collide = pygame.sprite.spritecollide(moving_object, static_sprites, False, pygame.sprite.collide_mask)
window.fill((255, 0, 0) if collide else (255, 255, 255))
all_sprites.draw(window)
pygame.display.update()
pygame.quit()
exit()
|
flexible
|
{
"blob_id": "b90c6a3f8fe084bc2acc0b733750124a1387527c",
"index": 1712,
"step-1": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n <mask token>\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\npygame.init()\n<mask token>\nwhile run:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n moving_object.rect.center = pygame.mouse.get_pos()\n all_sprites.update()\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, \n False, pygame.sprite.collide_mask)\n window.fill((255, 0, 0) if collide else (255, 255, 255))\n all_sprites.draw(window)\n pygame.display.update()\npygame.quit()\nexit()\n",
"step-4": "<mask token>\n\n\nclass SpriteObject(pygame.sprite.Sprite):\n\n def __init__(self, x, y, w, h, color):\n pygame.sprite.Sprite.__init__(self)\n self.angle = 0\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\n self.original_image.fill(color)\n self.image = self.original_image\n self.rect = self.image.get_rect(center=(x, y))\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n self.rotate()\n\n def rotate(self):\n self.angle += 0.3\n self.image = pygame.transform.rotate(self.original_image, self.angle)\n self.rect = self.image.get_rect(center=self.rect.center)\n self.mask = pygame.mask.from_surface(self.image)\n\n\npygame.init()\nclock = pygame.time.Clock()\nwindow = pygame.display.set_mode((400, 400))\nsize = window.get_size()\nmoving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))\nstatic_objects = [SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, \n 128, 128)), SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128,\n 128, 128)), SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (\n 128, 128, 128))]\nall_sprites = pygame.sprite.Group([moving_object] + static_objects)\nstatic_sprites = pygame.sprite.Group(static_objects)\nrun = True\nwhile run:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n moving_object.rect.center = pygame.mouse.get_pos()\n all_sprites.update()\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, \n False, pygame.sprite.collide_mask)\n window.fill((255, 0, 0) if collide else (255, 255, 255))\n all_sprites.draw(window)\n pygame.display.update()\npygame.quit()\nexit()\n",
"step-5": "import pygame\r\n\r\nclass SpriteObject(pygame.sprite.Sprite):\r\n def __init__(self, x, y, w, h, color):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.angle = 0\r\n self.original_image = pygame.Surface([w, h], pygame.SRCALPHA)\r\n self.original_image.fill(color)\r\n self.image = self.original_image\r\n self.rect = self.image.get_rect(center = (x, y))\r\n self.mask = pygame.mask.from_surface(self.image )\r\n def update(self):\r\n self.rotate()\r\n def rotate(self):\r\n self.angle += 0.3\r\n self.image = pygame.transform.rotate(self.original_image, self.angle)\r\n self.rect = self.image.get_rect(center = self.rect.center)\r\n self.mask = pygame.mask.from_surface(self.image )\r\n\r\npygame.init()\r\nclock = pygame.time.Clock()\r\nwindow = pygame.display.set_mode((400, 400))\r\nsize = window.get_size()\r\n\r\nmoving_object = SpriteObject(0, 0, 50, 50, (128, 0, 255))\r\nstatic_objects = [\r\n SpriteObject(size[0] // 2, size[1] // 3, 100, 50, (128, 128, 128)),\r\n SpriteObject(size[0] // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128)),\r\n SpriteObject(size[0] * 3 // 4, size[1] * 2 // 3, 100, 50, (128, 128, 128))\r\n]\r\nall_sprites = pygame.sprite.Group([moving_object] + static_objects)\r\nstatic_sprites = pygame.sprite.Group(static_objects)\r\n\r\nrun = True\r\nwhile run:\r\n clock.tick(60)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n moving_object.rect.center = pygame.mouse.get_pos()\r\n all_sprites.update() \r\n collide = pygame.sprite.spritecollide(moving_object, static_sprites, False, pygame.sprite.collide_mask)\r\n \r\n window.fill((255, 0, 0) if collide else (255, 255, 255))\r\n all_sprites.draw(window)\r\n pygame.display.update()\r\n\r\npygame.quit()\r\nexit()",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
from rest_framework.views import APIView
from .serializers import UserSerializer
from rest_framework import permissions
from .models import users
from rest_framework.response import Response
from django.http import JsonResponse
from rest_framework import viewsets
from profiles.models import profile
from profiles.serializers import ProfileSerializer
from follows.models import Follow
class GetDefaultUsers(APIView):
permission_classes =[
permissions.IsAuthenticated
]
def post(self,request, *args, **kwargs):
user = self.request.user
userers = users.objects.all()[:5]
users_to_pass = []
for user_now in userers:
user_id = user.id
check_if_already_followed = Follow.objects.filter(user_id = user_now.id).filter(follower_id = user.id)
if len(check_if_already_followed) == 0:
users_to_pass.append(user_now)
serilizer_class_many = UserSerializer(users_to_pass, many=True)
serilizer_class = UserSerializer(user)
return Response({
'users':serilizer_class_many.data,
"user":serilizer_class.data
})
class GetSpecificUser(APIView):
permission_classes =[
permissions.IsAuthenticated
]
def post(self, request,id=None, *args, **kwargs):
try:
queryset = users.objects.get(id=id)
except user.DoesNotExist:
return JsonResponse({'error':"user does not exits"}, status = 400)
try:
profile_queryset = profile.objects.get(user = queryset)
except profile.DoesNotExist:
return JsonResponse({'error':"user does not have a profile"}, status = 400)
serializer_class = UserSerializer(queryset)
serializer_class_profile = ProfileSerializer(profile_queryset)
return Response(
{'user':serializer_class.data,
'profile':serializer_class_profile.data
},
status=200)
|
normal
|
{
"blob_id": "c5a7f269f579bd1960afa4f700b5c3436ac6d91a",
"index": 2733,
"step-1": "<mask token>\n\n\nclass GetDefaultUsers(APIView):\n <mask token>\n <mask token>\n\n\nclass GetSpecificUser(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, id=None, *args, **kwargs):\n try:\n queryset = users.objects.get(id=id)\n except user.DoesNotExist:\n return JsonResponse({'error': 'user does not exits'}, status=400)\n try:\n profile_queryset = profile.objects.get(user=queryset)\n except profile.DoesNotExist:\n return JsonResponse({'error': 'user does not have a profile'},\n status=400)\n serializer_class = UserSerializer(queryset)\n serializer_class_profile = ProfileSerializer(profile_queryset)\n return Response({'user': serializer_class.data, 'profile':\n serializer_class_profile.data}, status=200)\n",
"step-2": "<mask token>\n\n\nclass GetDefaultUsers(APIView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n user = self.request.user\n userers = users.objects.all()[:5]\n users_to_pass = []\n for user_now in userers:\n user_id = user.id\n check_if_already_followed = Follow.objects.filter(user_id=\n user_now.id).filter(follower_id=user.id)\n if len(check_if_already_followed) == 0:\n users_to_pass.append(user_now)\n serilizer_class_many = UserSerializer(users_to_pass, many=True)\n serilizer_class = UserSerializer(user)\n return Response({'users': serilizer_class_many.data, 'user':\n serilizer_class.data})\n\n\nclass GetSpecificUser(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, id=None, *args, **kwargs):\n try:\n queryset = users.objects.get(id=id)\n except user.DoesNotExist:\n return JsonResponse({'error': 'user does not exits'}, status=400)\n try:\n profile_queryset = profile.objects.get(user=queryset)\n except profile.DoesNotExist:\n return JsonResponse({'error': 'user does not have a profile'},\n status=400)\n serializer_class = UserSerializer(queryset)\n serializer_class_profile = ProfileSerializer(profile_queryset)\n return Response({'user': serializer_class.data, 'profile':\n serializer_class_profile.data}, status=200)\n",
"step-3": "<mask token>\n\n\nclass GetDefaultUsers(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n user = self.request.user\n userers = users.objects.all()[:5]\n users_to_pass = []\n for user_now in userers:\n user_id = user.id\n check_if_already_followed = Follow.objects.filter(user_id=\n user_now.id).filter(follower_id=user.id)\n if len(check_if_already_followed) == 0:\n users_to_pass.append(user_now)\n serilizer_class_many = UserSerializer(users_to_pass, many=True)\n serilizer_class = UserSerializer(user)\n return Response({'users': serilizer_class_many.data, 'user':\n serilizer_class.data})\n\n\nclass GetSpecificUser(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, id=None, *args, **kwargs):\n try:\n queryset = users.objects.get(id=id)\n except user.DoesNotExist:\n return JsonResponse({'error': 'user does not exits'}, status=400)\n try:\n profile_queryset = profile.objects.get(user=queryset)\n except profile.DoesNotExist:\n return JsonResponse({'error': 'user does not have a profile'},\n status=400)\n serializer_class = UserSerializer(queryset)\n serializer_class_profile = ProfileSerializer(profile_queryset)\n return Response({'user': serializer_class.data, 'profile':\n serializer_class_profile.data}, status=200)\n",
"step-4": "from rest_framework.views import APIView\nfrom .serializers import UserSerializer\nfrom rest_framework import permissions\nfrom .models import users\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom profiles.models import profile\nfrom profiles.serializers import ProfileSerializer\nfrom follows.models import Follow\n\n\nclass GetDefaultUsers(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n user = self.request.user\n userers = users.objects.all()[:5]\n users_to_pass = []\n for user_now in userers:\n user_id = user.id\n check_if_already_followed = Follow.objects.filter(user_id=\n user_now.id).filter(follower_id=user.id)\n if len(check_if_already_followed) == 0:\n users_to_pass.append(user_now)\n serilizer_class_many = UserSerializer(users_to_pass, many=True)\n serilizer_class = UserSerializer(user)\n return Response({'users': serilizer_class_many.data, 'user':\n serilizer_class.data})\n\n\nclass GetSpecificUser(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, id=None, *args, **kwargs):\n try:\n queryset = users.objects.get(id=id)\n except user.DoesNotExist:\n return JsonResponse({'error': 'user does not exits'}, status=400)\n try:\n profile_queryset = profile.objects.get(user=queryset)\n except profile.DoesNotExist:\n return JsonResponse({'error': 'user does not have a profile'},\n status=400)\n serializer_class = UserSerializer(queryset)\n serializer_class_profile = ProfileSerializer(profile_queryset)\n return Response({'user': serializer_class.data, 'profile':\n serializer_class_profile.data}, status=200)\n",
"step-5": "from rest_framework.views import APIView\nfrom .serializers import UserSerializer\nfrom rest_framework import permissions\nfrom .models import users\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom profiles.models import profile\nfrom profiles.serializers import ProfileSerializer\nfrom follows.models import Follow\n\n\nclass GetDefaultUsers(APIView):\n permission_classes =[\n permissions.IsAuthenticated\n ]\n \n def post(self,request, *args, **kwargs):\n user = self.request.user\n userers = users.objects.all()[:5]\n users_to_pass = []\n for user_now in userers:\n user_id = user.id\n check_if_already_followed = Follow.objects.filter(user_id = user_now.id).filter(follower_id = user.id)\n if len(check_if_already_followed) == 0:\n users_to_pass.append(user_now)\n \n serilizer_class_many = UserSerializer(users_to_pass, many=True)\n serilizer_class = UserSerializer(user)\n return Response({\n 'users':serilizer_class_many.data,\n \"user\":serilizer_class.data\n })\n \nclass GetSpecificUser(APIView):\n permission_classes =[\n permissions.IsAuthenticated\n ]\n def post(self, request,id=None, *args, **kwargs):\n try:\n queryset = users.objects.get(id=id)\n except user.DoesNotExist:\n return JsonResponse({'error':\"user does not exits\"}, status = 400)\n try:\n profile_queryset = profile.objects.get(user = queryset)\n except profile.DoesNotExist:\n return JsonResponse({'error':\"user does not have a profile\"}, status = 400)\n \n serializer_class = UserSerializer(queryset)\n serializer_class_profile = ProfileSerializer(profile_queryset)\n \n return Response(\n {'user':serializer_class.data,\n 'profile':serializer_class_profile.data \n },\n status=200)\n \n ",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""
This type stub file was generated by pyright.
"""
import editobj3.introsp as introsp
import editobj3.editor as editor
from owlready2 import *
from editobj3.observe import *
from typing import Any, Optional
__all__ = ["EditedInstances", "OntologyInstanceEditor"]
class EditedInstances(object):
def __init__(self, ontology, Class):
self.ontology = ...
self.namespace = ...
self.Class = ...
self.name = ...
def get_instances(self):
...
instances = ...
def remove_instance(self, instance):
...
def __str__(self):
...
def details(self):
...
def addable_values(self):
...
descr = introsp.description(EditedInstances)
class TabPaneRepartitor(editor.PaneRepartitor):
def __init__(self, instance_editor, tab_edited_class):
self.instance_editor = ...
self.tab_edited_class = ...
def is_displayed_in_other_tab(self, attribute, o_Class):
...
def _compute(self, o, attribute, field_class: Optional[Any] = ...):
...
def is_displayed_in_hierarchy_pane(self, attribute, o, field_class: Optional[Any] = ...):
...
def is_displayed_in_attribute_pane(self, attribute, o, field_class: Optional[Any] = ...):
...
class OntologyInstanceEditor(editor.EditorTabbedDialog):
_Qt_MODULE = ...
_HTML_MODULE = ...
def __init__(self, gui: Optional[Any] = ..., master: Optional[Any] = ..., direction=..., on_validate: Optional[Any] = ..., edit_child_in_self=..., undo_stack: Optional[Any] = ..., on_close: Optional[Any] = ..., menubar: bool = ...):
self.ontology = ...
self.edited_classes = ...
self.last_undoables = ...
self.edited_instancess = ...
def on_dialog_closed(self, *args):
...
def set_ontology(self, ontology, edited_classes: Optional[Any] = ...):
self.ontology = ...
self.edited_classes = ...
self.last_undoables = ...
self.edited_instancess = ...
def add_tab_for_class(self, Class):
...
def on_save(self, *args):
self.last_undoables = ...
def on_save_as(self, *args):
...
|
normal
|
{
"blob_id": "440c116327ee587b5a305953772523011ece5dda",
"index": 9641,
"step-1": "<mask token>\n\n\nclass TabPaneRepartitor(editor.PaneRepartitor):\n\n def __init__(self, instance_editor, tab_edited_class):\n self.instance_editor = ...\n self.tab_edited_class = ...\n\n def is_displayed_in_other_tab(self, attribute, o_Class):\n ...\n <mask token>\n\n def is_displayed_in_hierarchy_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n def is_displayed_in_attribute_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n\nclass OntologyInstanceEditor(editor.EditorTabbedDialog):\n _Qt_MODULE = ...\n _HTML_MODULE = ...\n\n def __init__(self, gui: Optional[Any]=..., master: Optional[Any]=...,\n direction=..., on_validate: Optional[Any]=..., edit_child_in_self=\n ..., undo_stack: Optional[Any]=..., on_close: Optional[Any]=...,\n menubar: bool=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def on_dialog_closed(self, *args):\n ...\n\n def set_ontology(self, ontology, edited_classes: Optional[Any]=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def add_tab_for_class(self, Class):\n ...\n\n def on_save(self, *args):\n self.last_undoables = ...\n\n def on_save_as(self, *args):\n ...\n",
"step-2": "<mask token>\n\n\nclass EditedInstances(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def details(self):\n ...\n <mask token>\n\n\n<mask token>\n\n\nclass TabPaneRepartitor(editor.PaneRepartitor):\n\n def __init__(self, instance_editor, tab_edited_class):\n self.instance_editor = ...\n self.tab_edited_class = ...\n\n def is_displayed_in_other_tab(self, attribute, o_Class):\n ...\n\n def _compute(self, o, attribute, field_class: Optional[Any]=...):\n ...\n\n def is_displayed_in_hierarchy_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n def is_displayed_in_attribute_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n\nclass OntologyInstanceEditor(editor.EditorTabbedDialog):\n _Qt_MODULE = ...\n _HTML_MODULE = ...\n\n def __init__(self, gui: Optional[Any]=..., master: Optional[Any]=...,\n direction=..., on_validate: Optional[Any]=..., edit_child_in_self=\n ..., undo_stack: Optional[Any]=..., on_close: Optional[Any]=...,\n menubar: bool=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def on_dialog_closed(self, *args):\n ...\n\n def set_ontology(self, ontology, edited_classes: Optional[Any]=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def add_tab_for_class(self, Class):\n ...\n\n def on_save(self, *args):\n self.last_undoables = ...\n\n def on_save_as(self, *args):\n ...\n",
"step-3": "<mask token>\n\n\nclass EditedInstances(object):\n <mask token>\n\n def get_instances(self):\n ...\n <mask token>\n\n def remove_instance(self, instance):\n ...\n\n def __str__(self):\n ...\n\n def details(self):\n ...\n\n def addable_values(self):\n ...\n\n\n<mask token>\n\n\nclass TabPaneRepartitor(editor.PaneRepartitor):\n\n def __init__(self, instance_editor, tab_edited_class):\n self.instance_editor = ...\n self.tab_edited_class = ...\n\n def is_displayed_in_other_tab(self, attribute, o_Class):\n ...\n\n def _compute(self, o, attribute, field_class: Optional[Any]=...):\n ...\n\n def is_displayed_in_hierarchy_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n def is_displayed_in_attribute_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n\nclass OntologyInstanceEditor(editor.EditorTabbedDialog):\n _Qt_MODULE = ...\n _HTML_MODULE = ...\n\n def __init__(self, gui: Optional[Any]=..., master: Optional[Any]=...,\n direction=..., on_validate: Optional[Any]=..., edit_child_in_self=\n ..., undo_stack: Optional[Any]=..., on_close: Optional[Any]=...,\n menubar: bool=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def on_dialog_closed(self, *args):\n ...\n\n def set_ontology(self, ontology, edited_classes: Optional[Any]=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def add_tab_for_class(self, Class):\n ...\n\n def on_save(self, *args):\n self.last_undoables = ...\n\n def on_save_as(self, *args):\n ...\n",
"step-4": "<mask token>\n__all__ = ['EditedInstances', 'OntologyInstanceEditor']\n\n\nclass EditedInstances(object):\n\n def __init__(self, ontology, Class):\n self.ontology = ...\n self.namespace = ...\n self.Class = ...\n self.name = ...\n\n def get_instances(self):\n ...\n instances = ...\n\n def remove_instance(self, instance):\n ...\n\n def __str__(self):\n ...\n\n def details(self):\n ...\n\n def addable_values(self):\n ...\n\n\ndescr = introsp.description(EditedInstances)\n\n\nclass TabPaneRepartitor(editor.PaneRepartitor):\n\n def __init__(self, instance_editor, tab_edited_class):\n self.instance_editor = ...\n self.tab_edited_class = ...\n\n def is_displayed_in_other_tab(self, attribute, o_Class):\n ...\n\n def _compute(self, o, attribute, field_class: Optional[Any]=...):\n ...\n\n def is_displayed_in_hierarchy_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n def is_displayed_in_attribute_pane(self, attribute, o, field_class:\n Optional[Any]=...):\n ...\n\n\nclass OntologyInstanceEditor(editor.EditorTabbedDialog):\n _Qt_MODULE = ...\n _HTML_MODULE = ...\n\n def __init__(self, gui: Optional[Any]=..., master: Optional[Any]=...,\n direction=..., on_validate: Optional[Any]=..., edit_child_in_self=\n ..., undo_stack: Optional[Any]=..., on_close: Optional[Any]=...,\n menubar: bool=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def on_dialog_closed(self, *args):\n ...\n\n def set_ontology(self, ontology, edited_classes: Optional[Any]=...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n\n def add_tab_for_class(self, Class):\n ...\n\n def on_save(self, *args):\n self.last_undoables = ...\n\n def on_save_as(self, *args):\n ...\n",
"step-5": "\"\"\"\nThis type stub file was generated by pyright.\n\"\"\"\n\nimport editobj3.introsp as introsp\nimport editobj3.editor as editor\nfrom owlready2 import *\nfrom editobj3.observe import *\nfrom typing import Any, Optional\n\n__all__ = [\"EditedInstances\", \"OntologyInstanceEditor\"]\nclass EditedInstances(object):\n def __init__(self, ontology, Class):\n self.ontology = ...\n self.namespace = ...\n self.Class = ...\n self.name = ...\n \n def get_instances(self):\n ...\n \n instances = ...\n def remove_instance(self, instance):\n ...\n \n def __str__(self):\n ...\n \n def details(self):\n ...\n \n def addable_values(self):\n ...\n \n\n\ndescr = introsp.description(EditedInstances)\nclass TabPaneRepartitor(editor.PaneRepartitor):\n def __init__(self, instance_editor, tab_edited_class):\n self.instance_editor = ...\n self.tab_edited_class = ...\n \n def is_displayed_in_other_tab(self, attribute, o_Class):\n ...\n \n def _compute(self, o, attribute, field_class: Optional[Any] = ...):\n ...\n \n def is_displayed_in_hierarchy_pane(self, attribute, o, field_class: Optional[Any] = ...):\n ...\n \n def is_displayed_in_attribute_pane(self, attribute, o, field_class: Optional[Any] = ...):\n ...\n \n\n\nclass OntologyInstanceEditor(editor.EditorTabbedDialog):\n _Qt_MODULE = ...\n _HTML_MODULE = ...\n def __init__(self, gui: Optional[Any] = ..., master: Optional[Any] = ..., direction=..., on_validate: Optional[Any] = ..., edit_child_in_self=..., undo_stack: Optional[Any] = ..., on_close: Optional[Any] = ..., menubar: bool = ...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n \n def on_dialog_closed(self, *args):\n ...\n \n def set_ontology(self, ontology, edited_classes: Optional[Any] = ...):\n self.ontology = ...\n self.edited_classes = ...\n self.last_undoables = ...\n self.edited_instancess = ...\n \n def add_tab_for_class(self, Class):\n ...\n \n def on_save(self, *args):\n self.last_undoables = ...\n \n def on_save_as(self, *args):\n ...\n \n\n\n",
"step-ids": [
13,
16,
20,
23,
25
]
}
|
[
13,
16,
20,
23,
25
] |
import unittest2 as unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
# if you have a tearDown() in your test class,
# be sure to call this using super.tearDown()
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
# hide unittest dependencies here
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
|
normal
|
{
"blob_id": "e9c88e18472281438783d29648c673aa08366abb",
"index": 1686,
"step-1": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\nskip = unittest.skip\n",
"step-4": "import unittest2 as unittest\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\nskip = unittest.skip\n",
"step-5": "import unittest2 as unittest\n\n\nclass GpTestCase(unittest.TestCase):\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n # if you have a tearDown() in your test class,\n # be sure to call this using super.tearDown()\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n def decorate_function(test):\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n# hide unittest dependencies here\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\nskip = unittest.skip\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',
'motorcycle']
scorePath = '..\\data\\score'
usersDataFile = '..\\data\\usersData.json'
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
<|reserved_special_token_1|>
import os
import json
from nltk.corpus import wordnet as wn
from itertools import combinations
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',
'motorcycle']
scorePath = '..\\data\\score'
usersDataFile = '..\\data\\usersData.json'
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
<|reserved_special_token_1|>
import os
import json
from nltk.corpus import wordnet as wn
from itertools import combinations #計算排列組合
# 需要被計算的分類
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car', 'motorcycle']
# 計算完網紅權重存放的位置
scorePath = "..\\data\\score"
# getUsersData.py儲存網紅貼文資料的json檔案,拿來計算分數
usersDataFile = "..\\data\\usersData.json"
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2)) #計算所有label內的排列組合
for i in labelCom: #labelCom 為排列組合的結果
labelMean1 = wn.synsets(i[0])#取出每個計算詞的詞性
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:#因有可能出現計算結果為None的狀況 所以需要排除
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print("兩個詞的語意獲得最高分(語意相近)")
print("score : {}".format(maxScore))
print("firstWord : {}".format(firstWord))
print("secondWord : {}".format(secondWord))
print("\n")
if type(firstWord) == type('') :
return get_similar_words( list(words[0]) )
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = ("{}\\{}.json".format( scorePath, model ) )
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile,"w") as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic,dump_f)
with open(scoreFile,'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print( user )
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
# 換帳號,圖片分類分數初始化
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
# 換帳號,影片分類分數初始化
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print( user )
print( timestamp )
print( usersData[user]['data'][timestamp] )
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0].wup_similarity(k)
print("{} vs {} = {}".format( synsetWords[0], k, firstWordMaxWordSimilarity ))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1].wup_similarity(k)
print("{} vs {} = {}".format( synsetWords[1], k, secondWordMaxWordSimilarity ))
maxScore = (firstWordMaxWordSimilarity+secondWordMaxWordSimilarity)/2
if usersData[user]['data'][timestamp]['is_video']:
# print( '這部影片在{}獲得{}分'.format(t, maxScore) )
videoScoreDic[t] += maxScore - 0.05
else:
# print( '這張圖片在{}獲得{}分'.format(t, maxScore) )
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
#print('{0}為計算詞性,{1}為目標詞性,分數為:{2}'.format(j,k,j.wup_similarity(k)))
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print("{} vs {} = {}".format( s, k, maxScore ))
if usersData[user]['data'][timestamp]['is_video']:
# print( '這部影片在{}獲得{}分'.format(t, maxScore) )
videoScoreDic[t] += maxScore - 0.05
else:
# print( '這張圖片在{}獲得{}分'.format(t, maxScore) )
imageScoreDic[t] += maxScore - 0.05
# print('\n')
# print('\n')
# print("{}目前圖片個數 : {}".format(user, countImages))
# print("{}目前在每個分類的總分:".format(user))
# print(imageScoreDic)
# print('\n')
# print("{}目前影片個數 : {}".format(user, countVideos))
# print("{}目前在每個分類的總分:".format(user))
# print("{}目前在每個分類的總分:".format(user))
# print(videoScoreDic)
# print('\n\n')
if countPost != 0 and countPost % 50 == 0 :
print(countPost)
users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }
try:
currentImgScoreDic = { t:round(imageScoreDic[t]/countImages*100, 3) for t in myTypes }
except :
currentImgScoreDic = {}
print("目前沒有圖片")
try:
currentVideoScoreDic = { t:round(videoScoreDic[t]/countVideos*100, 3) for t in myTypes }
except :
currentVideoScoreDic = {}
print("目前沒有影片")
if user in users:
load_dict[str(countPost)][ users[user] ]['follower'] = usersData[user]['followers']
load_dict[str(countPost)][ users[user] ]['like'] = round( countLike/countPost, 3)
load_dict[str(countPost)][ users[user] ]['comment'] = round(countComment/countPost,3)
load_dict[str(countPost)][ users[user] ]['image']['amount'] = countImages
load_dict[str(countPost)][ users[user] ]['image']['score'] = currentImgScoreDic
load_dict[str(countPost)][ users[user] ]['video']['amount'] = countVideos
load_dict[str(countPost)][ users[user] ]['video']['score'] = currentVideoScoreDic
load_dict[str(countPost)][ users[user] ]['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round( countLike/countPost, 3)
new_dic['comment'] = round(countComment/countPost,3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )
load_dict[str(countPost)].append( new_dic )
if( countPost == 300 ):
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = { load_dict[str(countPost-50)][i]['name']:i for i in range( 0, len(load_dict[str(countPost-50)]) ) }
finalDic = load_dict[str(countPost-50)][ users[user] ]
while countPost <= 300:
users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }
if user in users:
load_dict[str(countPost)][ users[user] ] = finalDic
else:
load_dict[str(countPost)].append( finalDic )
countPost += 50
with open(scoreFile, "w") as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore("wordNet")
# print( usersData )
|
flexible
|
{
"blob_id": "879482e4df9c3d7f32d9b2a883201ae043e1189f",
"index": 871,
"step-1": "<mask token>\n\n\ndef get_similar_words(words):\n words = [w.lower() for w in words]\n if len(words) > 1:\n maxScore = 0\n firstWord = ''\n secondWord = ''\n labelCom = list(combinations(words, 2))\n for i in labelCom:\n labelMean1 = wn.synsets(i[0])\n labelMean2 = wn.synsets(i[1])\n for j in labelMean1:\n for k in labelMean2:\n if j.wup_similarity(k) is not None:\n if j.wup_similarity(k) > maxScore:\n maxScore = j.wup_similarity(k)\n firstWord = j\n secondWord = k\n print('兩個詞的語意獲得最高分(語意相近)')\n print('score : {}'.format(maxScore))\n print('firstWord : {}'.format(firstWord))\n print('secondWord : {}'.format(secondWord))\n print('\\n')\n if type(firstWord) == type(''):\n return get_similar_words(list(words[0]))\n else:\n print(firstWord, firstWord.definition())\n print(secondWord, secondWord.definition())\n print('\\n')\n return [firstWord, secondWord]\n else:\n synSetList = []\n for i in range(len(words)):\n labelMean1 = wn.synsets(words[i])\n for j in labelMean1:\n synSetList.append(j)\n return synSetList\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(usersDataFile, 'r') as load_f:\n usersData = json.load(load_f)\n\n\ndef get_similar_words(words):\n words = [w.lower() for w in words]\n if len(words) > 1:\n maxScore = 0\n firstWord = ''\n secondWord = ''\n labelCom = list(combinations(words, 2))\n for i in labelCom:\n labelMean1 = wn.synsets(i[0])\n labelMean2 = wn.synsets(i[1])\n for j in labelMean1:\n for k in labelMean2:\n if j.wup_similarity(k) is not None:\n if j.wup_similarity(k) > maxScore:\n maxScore = j.wup_similarity(k)\n firstWord = j\n secondWord = k\n print('兩個詞的語意獲得最高分(語意相近)')\n print('score : {}'.format(maxScore))\n print('firstWord : {}'.format(firstWord))\n print('secondWord : {}'.format(secondWord))\n print('\\n')\n if type(firstWord) == type(''):\n return get_similar_words(list(words[0]))\n else:\n print(firstWord, firstWord.definition())\n print(secondWord, secondWord.definition())\n print('\\n')\n return [firstWord, secondWord]\n else:\n synSetList = []\n for i in range(len(words)):\n labelMean1 = wn.synsets(words[i])\n for j in labelMean1:\n synSetList.append(j)\n return synSetList\n\n\ndef getWordNetScore(model):\n new_dic = {}\n scoreFile = '{}\\\\{}.json'.format(scorePath, model)\n print(scoreFile)\n if not os.path.exists(scoreFile):\n with open(scoreFile, 'w') as dump_f:\n new_dic['50'] = list()\n new_dic['100'] = list()\n new_dic['150'] = list()\n new_dic['200'] = list()\n new_dic['250'] = list()\n new_dic['300'] = list()\n json.dump(new_dic, dump_f)\n with open(scoreFile, 'r') as load_f:\n load_dict = json.load(load_f)\n for user in usersData:\n print('\\n')\n print(user)\n print('\\n')\n countPost = 0\n countLike = 0\n countComment = 0\n imageScoreDic = {}\n videoScoreDic = {}\n countImages = 0\n for t in myTypes:\n imageScoreDic[t] = 0\n countVideos = 0\n for t in myTypes:\n videoScoreDic[t] = 0\n for timestamp in usersData[user]['data']:\n countPost += 1\n countLike += usersData[user]['data'][timestamp]['likes']\n countComment += usersData[user]['data'][timestamp]['comments']\n if usersData[user]['data'][timestamp]['is_video']:\n countVideos += 1\n else:\n countImages += 1\n if 'labels' not in usersData[user]['data'][timestamp]:\n print(user)\n print(timestamp)\n print(usersData[user]['data'][timestamp])\n if len(usersData[user]['data'][timestamp]['labels']) > 0:\n synsetWords = get_similar_words(usersData[user]['data'][\n timestamp]['labels'])\n if len(synsetWords) == 2:\n for t in myTypes:\n standard = wn.synsets(t)\n firstWordMaxWordSimilarity = 0\n secondWordMaxWordSimilarity = 0\n for k in standard:\n if synsetWords[0].wup_similarity(k) is not None:\n if synsetWords[0].wup_similarity(k\n ) > firstWordMaxWordSimilarity:\n firstWordMaxWordSimilarity = synsetWords[0\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[0], k,\n firstWordMaxWordSimilarity))\n if synsetWords[1].wup_similarity(k) is not None:\n if synsetWords[1].wup_similarity(k\n ) > secondWordMaxWordSimilarity:\n secondWordMaxWordSimilarity = synsetWords[1\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[1], k,\n secondWordMaxWordSimilarity))\n maxScore = (firstWordMaxWordSimilarity +\n secondWordMaxWordSimilarity) / 2\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n else:\n for t in myTypes:\n maxScore = 0\n standard = wn.synsets(t)\n for k in standard:\n for s in synsetWords:\n if s.wup_similarity(k) is not None:\n if s.wup_similarity(k) > maxScore:\n maxScore = s.wup_similarity(k)\n print('{} vs {} = {}'.format(s, k,\n maxScore))\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n if countPost != 0 and countPost % 50 == 0:\n print(countPost)\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n try:\n currentImgScoreDic = {t: round(imageScoreDic[t] /\n countImages * 100, 3) for t in myTypes}\n except:\n currentImgScoreDic = {}\n print('目前沒有圖片')\n try:\n currentVideoScoreDic = {t: round(videoScoreDic[t] /\n countVideos * 100, 3) for t in myTypes}\n except:\n currentVideoScoreDic = {}\n print('目前沒有影片')\n if user in users:\n load_dict[str(countPost)][users[user]]['follower'\n ] = usersData[user]['followers']\n load_dict[str(countPost)][users[user]]['like'] = round(\n countLike / countPost, 3)\n load_dict[str(countPost)][users[user]]['comment'] = round(\n countComment / countPost, 3)\n load_dict[str(countPost)][users[user]]['image']['amount'\n ] = countImages\n load_dict[str(countPost)][users[user]]['image']['score'\n ] = currentImgScoreDic\n load_dict[str(countPost)][users[user]]['video']['amount'\n ] = countVideos\n load_dict[str(countPost)][users[user]]['video']['score'\n ] = currentVideoScoreDic\n load_dict[str(countPost)][users[user]]['ERate'] = round(\n (countLike / countPost + countComment / countPost) /\n usersData[user]['followers'], 5)\n else:\n new_dic = {}\n new_dic['name'] = user\n new_dic['follower'] = usersData[user]['followers']\n new_dic['like'] = round(countLike / countPost, 3)\n new_dic['comment'] = round(countComment / countPost, 3)\n new_dic['image'] = {}\n new_dic['image']['amount'] = countImages\n new_dic['image']['score'] = currentImgScoreDic\n new_dic['video'] = {}\n new_dic['video']['amount'] = countVideos\n new_dic['video']['score'] = currentVideoScoreDic\n new_dic['ERate'] = round((countLike / countPost + \n countComment / countPost) / usersData[user][\n 'followers'], 5)\n load_dict[str(countPost)].append(new_dic)\n if countPost == 300:\n break\n if countPost < 300:\n if countPost > 250:\n countPost = 300\n elif countPost > 200:\n countPost = 250\n elif countPost > 150:\n countPost = 200\n elif countPost > 100:\n countPost = 150\n elif countPost > 50:\n countPost = 100\n else:\n countPost = 50\n users = {load_dict[str(countPost - 50)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost - 50)]))}\n finalDic = load_dict[str(countPost - 50)][users[user]]\n while countPost <= 300:\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n if user in users:\n load_dict[str(countPost)][users[user]] = finalDic\n else:\n load_dict[str(countPost)].append(finalDic)\n countPost += 50\n with open(scoreFile, 'w') as dump_f:\n json.dump(load_dict, dump_f)\n\n\nif __name__ == '__main__':\n getWordNetScore('wordNet')\n",
"step-3": "<mask token>\nmyTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',\n 'motorcycle']\nscorePath = '..\\\\data\\\\score'\nusersDataFile = '..\\\\data\\\\usersData.json'\nwith open(usersDataFile, 'r') as load_f:\n usersData = json.load(load_f)\n\n\ndef get_similar_words(words):\n words = [w.lower() for w in words]\n if len(words) > 1:\n maxScore = 0\n firstWord = ''\n secondWord = ''\n labelCom = list(combinations(words, 2))\n for i in labelCom:\n labelMean1 = wn.synsets(i[0])\n labelMean2 = wn.synsets(i[1])\n for j in labelMean1:\n for k in labelMean2:\n if j.wup_similarity(k) is not None:\n if j.wup_similarity(k) > maxScore:\n maxScore = j.wup_similarity(k)\n firstWord = j\n secondWord = k\n print('兩個詞的語意獲得最高分(語意相近)')\n print('score : {}'.format(maxScore))\n print('firstWord : {}'.format(firstWord))\n print('secondWord : {}'.format(secondWord))\n print('\\n')\n if type(firstWord) == type(''):\n return get_similar_words(list(words[0]))\n else:\n print(firstWord, firstWord.definition())\n print(secondWord, secondWord.definition())\n print('\\n')\n return [firstWord, secondWord]\n else:\n synSetList = []\n for i in range(len(words)):\n labelMean1 = wn.synsets(words[i])\n for j in labelMean1:\n synSetList.append(j)\n return synSetList\n\n\ndef getWordNetScore(model):\n new_dic = {}\n scoreFile = '{}\\\\{}.json'.format(scorePath, model)\n print(scoreFile)\n if not os.path.exists(scoreFile):\n with open(scoreFile, 'w') as dump_f:\n new_dic['50'] = list()\n new_dic['100'] = list()\n new_dic['150'] = list()\n new_dic['200'] = list()\n new_dic['250'] = list()\n new_dic['300'] = list()\n json.dump(new_dic, dump_f)\n with open(scoreFile, 'r') as load_f:\n load_dict = json.load(load_f)\n for user in usersData:\n print('\\n')\n print(user)\n print('\\n')\n countPost = 0\n countLike = 0\n countComment = 0\n imageScoreDic = {}\n videoScoreDic = {}\n countImages = 0\n for t in myTypes:\n imageScoreDic[t] = 0\n countVideos = 0\n for t in myTypes:\n videoScoreDic[t] = 0\n for timestamp in usersData[user]['data']:\n countPost += 1\n countLike += usersData[user]['data'][timestamp]['likes']\n countComment += usersData[user]['data'][timestamp]['comments']\n if usersData[user]['data'][timestamp]['is_video']:\n countVideos += 1\n else:\n countImages += 1\n if 'labels' not in usersData[user]['data'][timestamp]:\n print(user)\n print(timestamp)\n print(usersData[user]['data'][timestamp])\n if len(usersData[user]['data'][timestamp]['labels']) > 0:\n synsetWords = get_similar_words(usersData[user]['data'][\n timestamp]['labels'])\n if len(synsetWords) == 2:\n for t in myTypes:\n standard = wn.synsets(t)\n firstWordMaxWordSimilarity = 0\n secondWordMaxWordSimilarity = 0\n for k in standard:\n if synsetWords[0].wup_similarity(k) is not None:\n if synsetWords[0].wup_similarity(k\n ) > firstWordMaxWordSimilarity:\n firstWordMaxWordSimilarity = synsetWords[0\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[0], k,\n firstWordMaxWordSimilarity))\n if synsetWords[1].wup_similarity(k) is not None:\n if synsetWords[1].wup_similarity(k\n ) > secondWordMaxWordSimilarity:\n secondWordMaxWordSimilarity = synsetWords[1\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[1], k,\n secondWordMaxWordSimilarity))\n maxScore = (firstWordMaxWordSimilarity +\n secondWordMaxWordSimilarity) / 2\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n else:\n for t in myTypes:\n maxScore = 0\n standard = wn.synsets(t)\n for k in standard:\n for s in synsetWords:\n if s.wup_similarity(k) is not None:\n if s.wup_similarity(k) > maxScore:\n maxScore = s.wup_similarity(k)\n print('{} vs {} = {}'.format(s, k,\n maxScore))\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n if countPost != 0 and countPost % 50 == 0:\n print(countPost)\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n try:\n currentImgScoreDic = {t: round(imageScoreDic[t] /\n countImages * 100, 3) for t in myTypes}\n except:\n currentImgScoreDic = {}\n print('目前沒有圖片')\n try:\n currentVideoScoreDic = {t: round(videoScoreDic[t] /\n countVideos * 100, 3) for t in myTypes}\n except:\n currentVideoScoreDic = {}\n print('目前沒有影片')\n if user in users:\n load_dict[str(countPost)][users[user]]['follower'\n ] = usersData[user]['followers']\n load_dict[str(countPost)][users[user]]['like'] = round(\n countLike / countPost, 3)\n load_dict[str(countPost)][users[user]]['comment'] = round(\n countComment / countPost, 3)\n load_dict[str(countPost)][users[user]]['image']['amount'\n ] = countImages\n load_dict[str(countPost)][users[user]]['image']['score'\n ] = currentImgScoreDic\n load_dict[str(countPost)][users[user]]['video']['amount'\n ] = countVideos\n load_dict[str(countPost)][users[user]]['video']['score'\n ] = currentVideoScoreDic\n load_dict[str(countPost)][users[user]]['ERate'] = round(\n (countLike / countPost + countComment / countPost) /\n usersData[user]['followers'], 5)\n else:\n new_dic = {}\n new_dic['name'] = user\n new_dic['follower'] = usersData[user]['followers']\n new_dic['like'] = round(countLike / countPost, 3)\n new_dic['comment'] = round(countComment / countPost, 3)\n new_dic['image'] = {}\n new_dic['image']['amount'] = countImages\n new_dic['image']['score'] = currentImgScoreDic\n new_dic['video'] = {}\n new_dic['video']['amount'] = countVideos\n new_dic['video']['score'] = currentVideoScoreDic\n new_dic['ERate'] = round((countLike / countPost + \n countComment / countPost) / usersData[user][\n 'followers'], 5)\n load_dict[str(countPost)].append(new_dic)\n if countPost == 300:\n break\n if countPost < 300:\n if countPost > 250:\n countPost = 300\n elif countPost > 200:\n countPost = 250\n elif countPost > 150:\n countPost = 200\n elif countPost > 100:\n countPost = 150\n elif countPost > 50:\n countPost = 100\n else:\n countPost = 50\n users = {load_dict[str(countPost - 50)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost - 50)]))}\n finalDic = load_dict[str(countPost - 50)][users[user]]\n while countPost <= 300:\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n if user in users:\n load_dict[str(countPost)][users[user]] = finalDic\n else:\n load_dict[str(countPost)].append(finalDic)\n countPost += 50\n with open(scoreFile, 'w') as dump_f:\n json.dump(load_dict, dump_f)\n\n\nif __name__ == '__main__':\n getWordNetScore('wordNet')\n",
"step-4": "import os\nimport json\nfrom nltk.corpus import wordnet as wn\nfrom itertools import combinations\nmyTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',\n 'motorcycle']\nscorePath = '..\\\\data\\\\score'\nusersDataFile = '..\\\\data\\\\usersData.json'\nwith open(usersDataFile, 'r') as load_f:\n usersData = json.load(load_f)\n\n\ndef get_similar_words(words):\n words = [w.lower() for w in words]\n if len(words) > 1:\n maxScore = 0\n firstWord = ''\n secondWord = ''\n labelCom = list(combinations(words, 2))\n for i in labelCom:\n labelMean1 = wn.synsets(i[0])\n labelMean2 = wn.synsets(i[1])\n for j in labelMean1:\n for k in labelMean2:\n if j.wup_similarity(k) is not None:\n if j.wup_similarity(k) > maxScore:\n maxScore = j.wup_similarity(k)\n firstWord = j\n secondWord = k\n print('兩個詞的語意獲得最高分(語意相近)')\n print('score : {}'.format(maxScore))\n print('firstWord : {}'.format(firstWord))\n print('secondWord : {}'.format(secondWord))\n print('\\n')\n if type(firstWord) == type(''):\n return get_similar_words(list(words[0]))\n else:\n print(firstWord, firstWord.definition())\n print(secondWord, secondWord.definition())\n print('\\n')\n return [firstWord, secondWord]\n else:\n synSetList = []\n for i in range(len(words)):\n labelMean1 = wn.synsets(words[i])\n for j in labelMean1:\n synSetList.append(j)\n return synSetList\n\n\ndef getWordNetScore(model):\n new_dic = {}\n scoreFile = '{}\\\\{}.json'.format(scorePath, model)\n print(scoreFile)\n if not os.path.exists(scoreFile):\n with open(scoreFile, 'w') as dump_f:\n new_dic['50'] = list()\n new_dic['100'] = list()\n new_dic['150'] = list()\n new_dic['200'] = list()\n new_dic['250'] = list()\n new_dic['300'] = list()\n json.dump(new_dic, dump_f)\n with open(scoreFile, 'r') as load_f:\n load_dict = json.load(load_f)\n for user in usersData:\n print('\\n')\n print(user)\n print('\\n')\n countPost = 0\n countLike = 0\n countComment = 0\n imageScoreDic = {}\n videoScoreDic = {}\n countImages = 0\n for t in myTypes:\n imageScoreDic[t] = 0\n countVideos = 0\n for t in myTypes:\n videoScoreDic[t] = 0\n for timestamp in usersData[user]['data']:\n countPost += 1\n countLike += usersData[user]['data'][timestamp]['likes']\n countComment += usersData[user]['data'][timestamp]['comments']\n if usersData[user]['data'][timestamp]['is_video']:\n countVideos += 1\n else:\n countImages += 1\n if 'labels' not in usersData[user]['data'][timestamp]:\n print(user)\n print(timestamp)\n print(usersData[user]['data'][timestamp])\n if len(usersData[user]['data'][timestamp]['labels']) > 0:\n synsetWords = get_similar_words(usersData[user]['data'][\n timestamp]['labels'])\n if len(synsetWords) == 2:\n for t in myTypes:\n standard = wn.synsets(t)\n firstWordMaxWordSimilarity = 0\n secondWordMaxWordSimilarity = 0\n for k in standard:\n if synsetWords[0].wup_similarity(k) is not None:\n if synsetWords[0].wup_similarity(k\n ) > firstWordMaxWordSimilarity:\n firstWordMaxWordSimilarity = synsetWords[0\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[0], k,\n firstWordMaxWordSimilarity))\n if synsetWords[1].wup_similarity(k) is not None:\n if synsetWords[1].wup_similarity(k\n ) > secondWordMaxWordSimilarity:\n secondWordMaxWordSimilarity = synsetWords[1\n ].wup_similarity(k)\n print('{} vs {} = {}'.format(\n synsetWords[1], k,\n secondWordMaxWordSimilarity))\n maxScore = (firstWordMaxWordSimilarity +\n secondWordMaxWordSimilarity) / 2\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n else:\n for t in myTypes:\n maxScore = 0\n standard = wn.synsets(t)\n for k in standard:\n for s in synsetWords:\n if s.wup_similarity(k) is not None:\n if s.wup_similarity(k) > maxScore:\n maxScore = s.wup_similarity(k)\n print('{} vs {} = {}'.format(s, k,\n maxScore))\n if usersData[user]['data'][timestamp]['is_video']:\n videoScoreDic[t] += maxScore - 0.05\n else:\n imageScoreDic[t] += maxScore - 0.05\n if countPost != 0 and countPost % 50 == 0:\n print(countPost)\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n try:\n currentImgScoreDic = {t: round(imageScoreDic[t] /\n countImages * 100, 3) for t in myTypes}\n except:\n currentImgScoreDic = {}\n print('目前沒有圖片')\n try:\n currentVideoScoreDic = {t: round(videoScoreDic[t] /\n countVideos * 100, 3) for t in myTypes}\n except:\n currentVideoScoreDic = {}\n print('目前沒有影片')\n if user in users:\n load_dict[str(countPost)][users[user]]['follower'\n ] = usersData[user]['followers']\n load_dict[str(countPost)][users[user]]['like'] = round(\n countLike / countPost, 3)\n load_dict[str(countPost)][users[user]]['comment'] = round(\n countComment / countPost, 3)\n load_dict[str(countPost)][users[user]]['image']['amount'\n ] = countImages\n load_dict[str(countPost)][users[user]]['image']['score'\n ] = currentImgScoreDic\n load_dict[str(countPost)][users[user]]['video']['amount'\n ] = countVideos\n load_dict[str(countPost)][users[user]]['video']['score'\n ] = currentVideoScoreDic\n load_dict[str(countPost)][users[user]]['ERate'] = round(\n (countLike / countPost + countComment / countPost) /\n usersData[user]['followers'], 5)\n else:\n new_dic = {}\n new_dic['name'] = user\n new_dic['follower'] = usersData[user]['followers']\n new_dic['like'] = round(countLike / countPost, 3)\n new_dic['comment'] = round(countComment / countPost, 3)\n new_dic['image'] = {}\n new_dic['image']['amount'] = countImages\n new_dic['image']['score'] = currentImgScoreDic\n new_dic['video'] = {}\n new_dic['video']['amount'] = countVideos\n new_dic['video']['score'] = currentVideoScoreDic\n new_dic['ERate'] = round((countLike / countPost + \n countComment / countPost) / usersData[user][\n 'followers'], 5)\n load_dict[str(countPost)].append(new_dic)\n if countPost == 300:\n break\n if countPost < 300:\n if countPost > 250:\n countPost = 300\n elif countPost > 200:\n countPost = 250\n elif countPost > 150:\n countPost = 200\n elif countPost > 100:\n countPost = 150\n elif countPost > 50:\n countPost = 100\n else:\n countPost = 50\n users = {load_dict[str(countPost - 50)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost - 50)]))}\n finalDic = load_dict[str(countPost - 50)][users[user]]\n while countPost <= 300:\n users = {load_dict[str(countPost)][i]['name']: i for i in\n range(0, len(load_dict[str(countPost)]))}\n if user in users:\n load_dict[str(countPost)][users[user]] = finalDic\n else:\n load_dict[str(countPost)].append(finalDic)\n countPost += 50\n with open(scoreFile, 'w') as dump_f:\n json.dump(load_dict, dump_f)\n\n\nif __name__ == '__main__':\n getWordNetScore('wordNet')\n",
"step-5": "import os\nimport json\nfrom nltk.corpus import wordnet as wn\nfrom itertools import combinations #計算排列組合 \n\n\n# 需要被計算的分類\nmyTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car', 'motorcycle']\n\n# 計算完網紅權重存放的位置\nscorePath = \"..\\\\data\\\\score\"\n\n# getUsersData.py儲存網紅貼文資料的json檔案,拿來計算分數\nusersDataFile = \"..\\\\data\\\\usersData.json\"\nwith open(usersDataFile, 'r') as load_f:\n usersData = json.load(load_f)\n\n\ndef get_similar_words(words):\n\n words = [w.lower() for w in words]\n\n if len(words) > 1:\n maxScore = 0\n firstWord = ''\n secondWord = ''\n\n labelCom = list(combinations(words, 2)) #計算所有label內的排列組合\n for i in labelCom: #labelCom 為排列組合的結果\n labelMean1 = wn.synsets(i[0])#取出每個計算詞的詞性\n labelMean2 = wn.synsets(i[1])\n\n for j in labelMean1:\n for k in labelMean2:\n if j.wup_similarity(k) is not None:#因有可能出現計算結果為None的狀況 所以需要排除\n if j.wup_similarity(k) > maxScore:\n maxScore = j.wup_similarity(k)\n firstWord = j\n secondWord = k\n\n print(\"兩個詞的語意獲得最高分(語意相近)\")\n print(\"score : {}\".format(maxScore))\n print(\"firstWord : {}\".format(firstWord))\n print(\"secondWord : {}\".format(secondWord))\n print(\"\\n\")\n\n if type(firstWord) == type('') :\n return get_similar_words( list(words[0]) )\n else:\n print(firstWord, firstWord.definition())\n print(secondWord, secondWord.definition())\n print('\\n')\n return [firstWord, secondWord]\n\n else:\n synSetList = []\n for i in range(len(words)):\n labelMean1 = wn.synsets(words[i])\n for j in labelMean1:\n synSetList.append(j)\n\n return synSetList\n\n\ndef getWordNetScore(model):\n \n new_dic = {}\n scoreFile = (\"{}\\\\{}.json\".format( scorePath, model ) )\n print(scoreFile)\n if not os.path.exists(scoreFile):\n with open(scoreFile,\"w\") as dump_f:\n new_dic['50'] = list()\n new_dic['100'] = list()\n new_dic['150'] = list()\n new_dic['200'] = list()\n new_dic['250'] = list()\n new_dic['300'] = list()\n json.dump(new_dic,dump_f)\n \n with open(scoreFile,'r') as load_f:\n load_dict = json.load(load_f)\n\n for user in usersData:\n print('\\n')\n print( user )\n print('\\n')\n countPost = 0\n countLike = 0\n countComment = 0\n imageScoreDic = {}\n videoScoreDic = {}\n \n # 換帳號,圖片分類分數初始化\n countImages = 0\n for t in myTypes:\n imageScoreDic[t] = 0\n\n # 換帳號,影片分類分數初始化\n countVideos = 0\n for t in myTypes:\n videoScoreDic[t] = 0\n\n\n for timestamp in usersData[user]['data']:\n \n countPost += 1\n countLike += usersData[user]['data'][timestamp]['likes']\n countComment += usersData[user]['data'][timestamp]['comments']\n \n if usersData[user]['data'][timestamp]['is_video']:\n countVideos += 1\n else:\n countImages += 1\n\n if 'labels' not in usersData[user]['data'][timestamp]:\n print( user )\n print( timestamp )\n print( usersData[user]['data'][timestamp] )\n\n if len(usersData[user]['data'][timestamp]['labels']) > 0:\n\n synsetWords = get_similar_words(usersData[user]['data'][timestamp]['labels'])\n\n if len(synsetWords) == 2:\n for t in myTypes:\n standard = wn.synsets(t)\n firstWordMaxWordSimilarity = 0\n secondWordMaxWordSimilarity = 0\n \n for k in standard:\n if synsetWords[0].wup_similarity(k) is not None:\n if synsetWords[0].wup_similarity(k) > firstWordMaxWordSimilarity:\n firstWordMaxWordSimilarity = synsetWords[0].wup_similarity(k)\n print(\"{} vs {} = {}\".format( synsetWords[0], k, firstWordMaxWordSimilarity ))\n \n if synsetWords[1].wup_similarity(k) is not None:\n if synsetWords[1].wup_similarity(k) > secondWordMaxWordSimilarity:\n secondWordMaxWordSimilarity = synsetWords[1].wup_similarity(k)\n print(\"{} vs {} = {}\".format( synsetWords[1], k, secondWordMaxWordSimilarity ))\n \n maxScore = (firstWordMaxWordSimilarity+secondWordMaxWordSimilarity)/2\n if usersData[user]['data'][timestamp]['is_video']:\n # print( '這部影片在{}獲得{}分'.format(t, maxScore) )\n videoScoreDic[t] += maxScore - 0.05 \n else:\n # print( '這張圖片在{}獲得{}分'.format(t, maxScore) )\n imageScoreDic[t] += maxScore - 0.05\n else:\n\n for t in myTypes:\n maxScore = 0\n standard = wn.synsets(t)\n\n for k in standard:\n for s in synsetWords:\n if s.wup_similarity(k) is not None:\n #print('{0}為計算詞性,{1}為目標詞性,分數為:{2}'.format(j,k,j.wup_similarity(k)))\n if s.wup_similarity(k) > maxScore:\n maxScore = s.wup_similarity(k)\n print(\"{} vs {} = {}\".format( s, k, maxScore ))\n \n if usersData[user]['data'][timestamp]['is_video']:\n # print( '這部影片在{}獲得{}分'.format(t, maxScore) )\n videoScoreDic[t] += maxScore - 0.05 \n else:\n # print( '這張圖片在{}獲得{}分'.format(t, maxScore) )\n imageScoreDic[t] += maxScore - 0.05\n \n # print('\\n') \n \n \n # print('\\n')\n # print(\"{}目前圖片個數 : {}\".format(user, countImages))\n # print(\"{}目前在每個分類的總分:\".format(user))\n # print(imageScoreDic)\n # print('\\n')\n # print(\"{}目前影片個數 : {}\".format(user, countVideos))\n # print(\"{}目前在每個分類的總分:\".format(user))\n # print(\"{}目前在每個分類的總分:\".format(user))\n # print(videoScoreDic)\n # print('\\n\\n')\n\n if countPost != 0 and countPost % 50 == 0 :\n print(countPost)\n users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }\n try:\n currentImgScoreDic = { t:round(imageScoreDic[t]/countImages*100, 3) for t in myTypes }\n except :\n currentImgScoreDic = {}\n print(\"目前沒有圖片\")\n try:\n currentVideoScoreDic = { t:round(videoScoreDic[t]/countVideos*100, 3) for t in myTypes }\n except :\n currentVideoScoreDic = {}\n print(\"目前沒有影片\")\n \n if user in users:\n load_dict[str(countPost)][ users[user] ]['follower'] = usersData[user]['followers']\n load_dict[str(countPost)][ users[user] ]['like'] = round( countLike/countPost, 3)\n load_dict[str(countPost)][ users[user] ]['comment'] = round(countComment/countPost,3)\n load_dict[str(countPost)][ users[user] ]['image']['amount'] = countImages\n load_dict[str(countPost)][ users[user] ]['image']['score'] = currentImgScoreDic\n load_dict[str(countPost)][ users[user] ]['video']['amount'] = countVideos\n load_dict[str(countPost)][ users[user] ]['video']['score'] = currentVideoScoreDic\n load_dict[str(countPost)][ users[user] ]['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )\n else:\n new_dic = {}\n new_dic['name'] = user\n new_dic['follower'] = usersData[user]['followers']\n new_dic['like'] = round( countLike/countPost, 3)\n new_dic['comment'] = round(countComment/countPost,3)\n new_dic['image'] = {}\n new_dic['image']['amount'] = countImages\n new_dic['image']['score'] = currentImgScoreDic\n new_dic['video'] = {}\n new_dic['video']['amount'] = countVideos\n new_dic['video']['score'] = currentVideoScoreDic\n new_dic['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )\n\n load_dict[str(countPost)].append( new_dic )\n \n if( countPost == 300 ):\n break\n\n if countPost < 300:\n \n if countPost > 250:\n countPost = 300\n elif countPost > 200:\n countPost = 250\n elif countPost > 150:\n countPost = 200\n elif countPost > 100:\n countPost = 150\n elif countPost > 50:\n countPost = 100\n else:\n countPost = 50\n \n users = { load_dict[str(countPost-50)][i]['name']:i for i in range( 0, len(load_dict[str(countPost-50)]) ) }\n finalDic = load_dict[str(countPost-50)][ users[user] ]\n while countPost <= 300:\n users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }\n if user in users:\n load_dict[str(countPost)][ users[user] ] = finalDic\n else:\n load_dict[str(countPost)].append( finalDic )\n \n countPost += 50\n \n with open(scoreFile, \"w\") as dump_f:\n json.dump(load_dict, dump_f)\n\n\nif __name__ == '__main__':\n\n getWordNetScore(\"wordNet\")\n\n # print( usersData )\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
hostname = 'forecast.derivacloud.org'
catalog_id = '5'
model = Model.from_catalog(DerivaServer('https', hostname, credentials=
get_credential(hostname)).connect_ermrest(catalog_id))
tabname = model.schemas['ETAS'].tables['Forecast']
print('Before Adding Column')
for column in tabname.column_definitions:
print(column.name, column.type.typename, column.nullok)
"""
Define a series of column names that reflect metadata we expect to extract from
the ETAS directory names. These are initial names, defined by developers.
ETAS modelers may want to rename these columns to be more meaningful to domain experts.
For this first version, all fields are defined as free text.
Redefinition of these values as controlled vocabularies are a future refinement.
1) Sim_Start_Time: Enumeration List
e.g: "2019_07_16"
not null
2) Catalog_Mag: Enumeration List
e.g.: "ComCatM7p1"
not null
3) Event_ID: Enumeration List
e.g.: "ci39457511"
not null
4) Post_Event_Date: Enumeration List
e.g.: "7DaysAfter"
maybe null
5) Rupture_Def: Enumeration List
e.g. "ShakeMapSurfaces"
"ShakeMapSurfaces-noSpont-full_td-scale1.14"
not null
"""
tabname.create_column(Column.define('Sim_Start_Time', builtin_types.
text, comment='Simulation Start Time'))
tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,
comment='Catalog Name and Event Magnitude'))
tabname.create_column(Column.define('Event_ID', builtin_types.text,
comment='Earthquake Event ID'))
tabname.create_column(Column.define('Post_Event_Date', builtin_types.
text, comment='Days Forecast made after Mainshock'))
tabname.create_column(Column.define('Rupture_Definition', builtin_types
.text, comment='Type of Rupture used in ETAS forecast'))
print('After Adding Column')
etas_model = model.schemas['ETAS']
tabname = etas_model.tables['Forecast']
for column in tabname.column_definitions:
print(column.name, column.type.typename, column.nullok)
sys.exit(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import sys
from deriva.core import DerivaServer, ErmrestCatalog, get_credential
from deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag
if __name__ == '__main__':
hostname = 'forecast.derivacloud.org'
catalog_id = '5'
model = Model.from_catalog(DerivaServer('https', hostname, credentials=
get_credential(hostname)).connect_ermrest(catalog_id))
tabname = model.schemas['ETAS'].tables['Forecast']
print('Before Adding Column')
for column in tabname.column_definitions:
print(column.name, column.type.typename, column.nullok)
"""
Define a series of column names that reflect metadata we expect to extract from
the ETAS directory names. These are initial names, defined by developers.
ETAS modelers may want to rename these columns to be more meaningful to domain experts.
For this first version, all fields are defined as free text.
Redefinition of these values as controlled vocabularies are a future refinement.
1) Sim_Start_Time: Enumeration List
e.g: "2019_07_16"
not null
2) Catalog_Mag: Enumeration List
e.g.: "ComCatM7p1"
not null
3) Event_ID: Enumeration List
e.g.: "ci39457511"
not null
4) Post_Event_Date: Enumeration List
e.g.: "7DaysAfter"
maybe null
5) Rupture_Def: Enumeration List
e.g. "ShakeMapSurfaces"
"ShakeMapSurfaces-noSpont-full_td-scale1.14"
not null
"""
tabname.create_column(Column.define('Sim_Start_Time', builtin_types.
text, comment='Simulation Start Time'))
tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,
comment='Catalog Name and Event Magnitude'))
tabname.create_column(Column.define('Event_ID', builtin_types.text,
comment='Earthquake Event ID'))
tabname.create_column(Column.define('Post_Event_Date', builtin_types.
text, comment='Days Forecast made after Mainshock'))
tabname.create_column(Column.define('Rupture_Definition', builtin_types
.text, comment='Type of Rupture used in ETAS forecast'))
print('After Adding Column')
etas_model = model.schemas['ETAS']
tabname = etas_model.tables['Forecast']
for column in tabname.column_definitions:
print(column.name, column.type.typename, column.nullok)
sys.exit(0)
<|reserved_special_token_1|>
#!/usr/bin/env python
"""add_columns.py: This script reads an SCEC ETAS forecast directory name
and extracts key fields that are then added as attributes in the SCEC Deriva
schema.
This script is an example of how the ERD used by Deriva is extended as additional
information or metadata is added to the asset descriptions in Deriva.
This must be run after the create_model.py script has been run, because this modifies
the ERD created by that script.
The expectation is this is run once. If it is run a second time, we expect errors
indicating the columns already exist.
Philip Maechling
3 April 2021
"""
import os
import sys
from deriva.core import DerivaServer, ErmrestCatalog, get_credential
from deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag
if __name__ == "__main__":
# Connect to server and catalog ------------------------------------------------------------------#
hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)
catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)
model = Model.from_catalog(
DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)
)
#
# During testing, exit before any table modifications are done
#
tabname = model.schemas['ETAS'].tables["Forecast"]
print("Before Adding Column")
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
"""
Define a series of column names that reflect metadata we expect to extract from
the ETAS directory names. These are initial names, defined by developers.
ETAS modelers may want to rename these columns to be more meaningful to domain experts.
For this first version, all fields are defined as free text.
Redefinition of these values as controlled vocabularies are a future refinement.
1) Sim_Start_Time: Enumeration List
e.g: "2019_07_16"
not null
2) Catalog_Mag: Enumeration List
e.g.: "ComCatM7p1"
not null
3) Event_ID: Enumeration List
e.g.: "ci39457511"
not null
4) Post_Event_Date: Enumeration List
e.g.: "7DaysAfter"
maybe null
5) Rupture_Def: Enumeration List
e.g. "ShakeMapSurfaces"
"ShakeMapSurfaces-noSpont-full_td-scale1.14"
not null
"""
tabname.create_column(Column.define('Sim_Start_Time',
builtin_types.text,
comment="Simulation Start Time"))
tabname.create_column(Column.define('Catalog_Mag',
builtin_types.text,
comment="Catalog Name and Event Magnitude"))
tabname.create_column(Column.define('Event_ID',
builtin_types.text,
comment="Earthquake Event ID"))
tabname.create_column(Column.define('Post_Event_Date',
builtin_types.text,
comment="Days Forecast made after Mainshock"))
tabname.create_column(Column.define('Rupture_Definition',
builtin_types.text,
comment="Type of Rupture used in ETAS forecast"))
# retrieve catalog model again to ensure we reflect latest structural changes
# example shows this, but I'm not sure what it returns
print("After Adding Column")
etas_model = model.schemas['ETAS']
tabname = etas_model.tables["Forecast"]
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
sys.exit(0)
|
flexible
|
{
"blob_id": "a745f72081e06ff3399f9d7f65a30d7eef594689",
"index": 2292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-3": "<mask token>\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-4": "#!/usr/bin/env python\n\n\n\"\"\"add_columns.py: This script reads an SCEC ETAS forecast directory name\nand extracts key fields that are then added as attributes in the SCEC Deriva\nschema.\n\n This script is an example of how the ERD used by Deriva is extended as additional\n information or metadata is added to the asset descriptions in Deriva.\n\n This must be run after the create_model.py script has been run, because this modifies\n the ERD created by that script.\n \n The expectation is this is run once. If it is run a second time, we expect errors\n indicating the columns already exist.\n \nPhilip Maechling\n3 April 2021\n\"\"\"\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\n\nif __name__ == \"__main__\":\n\n # Connect to server and catalog ------------------------------------------------------------------#\n\n hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)\n catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)\n\n model = Model.from_catalog(\n DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)\n )\n\n #\n # During testing, exit before any table modifications are done\n #\n\n\n tabname = model.schemas['ETAS'].tables[\"Forecast\"]\n print(\"Before Adding Column\")\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n\n\n tabname.create_column(Column.define('Sim_Start_Time',\n builtin_types.text,\n comment=\"Simulation Start Time\"))\n\n tabname.create_column(Column.define('Catalog_Mag',\n builtin_types.text,\n comment=\"Catalog Name and Event Magnitude\"))\n\n tabname.create_column(Column.define('Event_ID',\n builtin_types.text,\n comment=\"Earthquake Event ID\"))\n\n tabname.create_column(Column.define('Post_Event_Date',\n builtin_types.text,\n comment=\"Days Forecast made after Mainshock\"))\n\n tabname.create_column(Column.define('Rupture_Definition',\n builtin_types.text,\n comment=\"Type of Rupture used in ETAS forecast\"))\n\n # retrieve catalog model again to ensure we reflect latest structural changes\n # example shows this, but I'm not sure what it returns\n print(\"After Adding Column\")\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables[\"Forecast\"]\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n sys.exit(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__version__ = '18.07.0'
|
normal
|
{
"blob_id": "3cac7829cf0c07ddc704a25ec3c781c9510a8e0c",
"index": 3613,
"step-1": "<mask token>\n",
"step-2": "__version__ = '18.07.0'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class Get(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUp(self):
pass
<|reserved_special_token_0|>
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'
] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'
] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Get(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUp(self):
pass
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'
] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'
] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Get(unittest.TestCase):
TMPTOKEN = ''
TOKEN = ''
def setUp(self):
pass
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'
] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'
] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import requests
import unittest
import time
from common import HTMLTestReport
class Get(unittest.TestCase):
TMPTOKEN = ''
TOKEN = ''
def setUp(self):
pass
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'
] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'
] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
def Run():
suite = unittest.TestSuite()
suite.addTest(Get('test_gettmptoken'))
suite.addTest(Get('test_gettoken'))
suite.addTest(Get('test_getQualificationInfo'))
now = time.strftime('%Y-%m-%d_%H%M', time.localtime())
filepath = './report/' + now + '.html'
fp = open(filepath, 'wb')
runner = HTMLTestReport.HTMLTestRunner(stream=fp, title='接口自动化测试报告',
tester='白雪')
runner.run(suite)
fp.close()
Run()
<|reserved_special_token_1|>
import requests
import unittest
import time
from common import HTMLTestReport
class Get(unittest.TestCase):
TMPTOKEN = ''
TOKEN = ''
def setUp(self):
pass
# 获取临时token,opterTmpToken
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
# 获取正式token,opterToken
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
#获取教师资质信息,校验结果是否返回success
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken':Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
# 获取教师资质信息,校验接口返回的老师资质相关信息是否正确
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
def Run():
suite = unittest.TestSuite()
# 执行顺序是安装加载顺序:先执行test_case2,再执行test_case1
suite.addTest(Get('test_gettmptoken'))
suite.addTest(Get('test_gettoken'))
suite.addTest(Get('test_getQualificationInfo'))
now = time.strftime("%Y-%m-%d_%H%M", time.localtime())
filepath = './report/' + now + '.html' # 测试报告存放的位置
fp = open(filepath, 'wb')
runner = HTMLTestReport.HTMLTestRunner(
stream=fp,
title='接口自动化测试报告',
tester='白雪'
)
runner.run(suite)
fp.close()
Run()
|
flexible
|
{
"blob_id": "773c217f7f76bd82ed3dabf7ae1aba1871f0932f",
"index": 8539,
"step-1": "<mask token>\n\n\nclass Get(unittest.TestCase):\n <mask token>\n <mask token>\n\n def setUp(self):\n pass\n <mask token>\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Get(unittest.TestCase):\n <mask token>\n <mask token>\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-4": "import requests\nimport unittest\nimport time\nfrom common import HTMLTestReport\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\ndef Run():\n suite = unittest.TestSuite()\n suite.addTest(Get('test_gettmptoken'))\n suite.addTest(Get('test_gettoken'))\n suite.addTest(Get('test_getQualificationInfo'))\n now = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n filepath = './report/' + now + '.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReport.HTMLTestRunner(stream=fp, title='接口自动化测试报告',\n tester='白雪')\n runner.run(suite)\n fp.close()\n\n\nRun()\n",
"step-5": "import requests\nimport unittest\nimport time\nfrom common import HTMLTestReport\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n def setUp(self):\n pass\n\n # 获取临时token,opterTmpToken\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n # 获取正式token,opterToken\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n #获取教师资质信息,校验结果是否返回success\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken':Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n # 获取教师资质信息,校验接口返回的老师资质相关信息是否正确\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\ndef Run():\n suite = unittest.TestSuite()\n # 执行顺序是安装加载顺序:先执行test_case2,再执行test_case1\n suite.addTest(Get('test_gettmptoken'))\n suite.addTest(Get('test_gettoken'))\n suite.addTest(Get('test_getQualificationInfo'))\n now = time.strftime(\"%Y-%m-%d_%H%M\", time.localtime())\n filepath = './report/' + now + '.html' # 测试报告存放的位置\n fp = open(filepath, 'wb')\n runner = HTMLTestReport.HTMLTestRunner(\n stream=fp,\n title='接口自动化测试报告',\n tester='白雪'\n )\n runner.run(suite)\n fp.close()\n\nRun()",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,
theme):
print(hangnam_pics[len(missedLetters)])
print('Тема:', theme)
for index in range(len(secretWord)):
dashed_word = ''
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + ' '
else:
dashed_word += '_ '
print('Слово на доске: ', dashed_word)
for index in range(len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += '_ '
else:
alfabet_board = alfabet_board + alfabet[index] + ' '
print('Оставшиеся буквы: ', alfabet_board)
print('Ошибочные буквы: ', end='')
if missedLetters == '':
print(' -', end='')
else:
for letter in missedLetters:
print(letter + ' ', end='')
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input('Введите букву --> ').upper()
if len(guess) != 1:
print('Пожалуйста, введите одну букву.')
elif guess in correctLetters or guess in missedLetters:
print('Вы уже называли эту букву')
elif guess in ' _' or guess not in alfabet or type(guess) != str:
print('Это не буква. Введите БУКВУ')
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(
f""" ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!"""
)
return True
elif len(missedLetters) == 6:
print()
print()
print(
f""" ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!"""
)
return True
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,
theme):
print(hangnam_pics[len(missedLetters)])
print('Тема:', theme)
for index in range(len(secretWord)):
dashed_word = ''
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + ' '
else:
dashed_word += '_ '
print('Слово на доске: ', dashed_word)
for index in range(len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += '_ '
else:
alfabet_board = alfabet_board + alfabet[index] + ' '
print('Оставшиеся буквы: ', alfabet_board)
print('Ошибочные буквы: ', end='')
if missedLetters == '':
print(' -', end='')
else:
for letter in missedLetters:
print(letter + ' ', end='')
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input('Введите букву --> ').upper()
if len(guess) != 1:
print('Пожалуйста, введите одну букву.')
elif guess in correctLetters or guess in missedLetters:
print('Вы уже называли эту букву')
elif guess in ' _' or guess not in alfabet or type(guess) != str:
print('Это не буква. Введите БУКВУ')
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(
f""" ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!"""
)
return True
elif len(missedLetters) == 6:
print()
print()
print(
f""" ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!"""
)
return True
else:
return False
def oneMore():
while True:
print()
answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()
if answer == 'да':
print()
print()
print()
print()
return True
elif answer == 'нет':
return False
else:
print('Ваш ответ не понятен. Попробуем еще раз.')
def mainGame(themes):
missedLetters = ''
correctLetters = ''
alfabet_board = ''
print()
print(
""" Добро пожаловать в игру ВИСЕЛИЦА!
У Вас есть 6 попыток угадать слово по заданной теме.
После каждой неверной попытки к рисунку будет добавляться часть человечка.
Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!
Удачи!
"""
)
print()
input('Нажмите ENTER для старта.')
theme, secretWord = getRandomWord(themes)
while True:
displayBoard(missedLetters, correctLetters, secretWord,
alfabet_board, theme)
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
guess = getGuess(correctLetters, missedLetters)
if guess in secretWord:
print('Такая буква есть в слове!')
correctLetters += guess
time.sleep(2)
else:
print('Такой буквы нет в слове!')
missedLetters += guess
time.sleep(2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,
theme):
print(hangnam_pics[len(missedLetters)])
print('Тема:', theme)
for index in range(len(secretWord)):
dashed_word = ''
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + ' '
else:
dashed_word += '_ '
print('Слово на доске: ', dashed_word)
for index in range(len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += '_ '
else:
alfabet_board = alfabet_board + alfabet[index] + ' '
print('Оставшиеся буквы: ', alfabet_board)
print('Ошибочные буквы: ', end='')
if missedLetters == '':
print(' -', end='')
else:
for letter in missedLetters:
print(letter + ' ', end='')
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input('Введите букву --> ').upper()
if len(guess) != 1:
print('Пожалуйста, введите одну букву.')
elif guess in correctLetters or guess in missedLetters:
print('Вы уже называли эту букву')
elif guess in ' _' or guess not in alfabet or type(guess) != str:
print('Это не буква. Введите БУКВУ')
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(
f""" ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!"""
)
return True
elif len(missedLetters) == 6:
print()
print()
print(
f""" ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!"""
)
return True
else:
return False
def oneMore():
while True:
print()
answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()
if answer == 'да':
print()
print()
print()
print()
return True
elif answer == 'нет':
return False
else:
print('Ваш ответ не понятен. Попробуем еще раз.')
def mainGame(themes):
missedLetters = ''
correctLetters = ''
alfabet_board = ''
print()
print(
""" Добро пожаловать в игру ВИСЕЛИЦА!
У Вас есть 6 попыток угадать слово по заданной теме.
После каждой неверной попытки к рисунку будет добавляться часть человечка.
Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!
Удачи!
"""
)
print()
input('Нажмите ENTER для старта.')
theme, secretWord = getRandomWord(themes)
while True:
displayBoard(missedLetters, correctLetters, secretWord,
alfabet_board, theme)
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
guess = getGuess(correctLetters, missedLetters)
if guess in secretWord:
print('Такая буква есть в слове!')
correctLetters += guess
time.sleep(2)
else:
print('Такой буквы нет в слове!')
missedLetters += guess
time.sleep(2)
<|reserved_special_token_0|>
hangnam_pics = ["""
+---+
|
|
|
===""",
'\n +---+\n O |\n |\n |\n ===',
"""
+---+
O |
| |
|
===""",
'\n +---+\n O |\n |\\ |\n |\n ===',
"""
+---+
O |
/|\\ |
|
===""",
' \n +---+\n O |\n /|\\ |\n / |\n ===',
"""
+---+
O |
/|\\ |
/ \\ |
==="""]
alfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',
'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',
'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']
goroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',
'Полтава', 'Чернигов']
zhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',
'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',
'муравьед', 'панда', 'ленивец']
themes = {'Города Украины': goroda, 'Животные': zhyvotnye}
mainGame(themes)
print()
print(' ВСЕГО ДОБРОГО!')
<|reserved_special_token_1|>
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,
theme):
print(hangnam_pics[len(missedLetters)])
print('Тема:', theme)
for index in range(len(secretWord)):
dashed_word = ''
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + ' '
else:
dashed_word += '_ '
print('Слово на доске: ', dashed_word)
for index in range(len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += '_ '
else:
alfabet_board = alfabet_board + alfabet[index] + ' '
print('Оставшиеся буквы: ', alfabet_board)
print('Ошибочные буквы: ', end='')
if missedLetters == '':
print(' -', end='')
else:
for letter in missedLetters:
print(letter + ' ', end='')
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input('Введите букву --> ').upper()
if len(guess) != 1:
print('Пожалуйста, введите одну букву.')
elif guess in correctLetters or guess in missedLetters:
print('Вы уже называли эту букву')
elif guess in ' _' or guess not in alfabet or type(guess) != str:
print('Это не буква. Введите БУКВУ')
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(
f""" ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!"""
)
return True
elif len(missedLetters) == 6:
print()
print()
print(
f""" ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!"""
)
return True
else:
return False
def oneMore():
while True:
print()
answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()
if answer == 'да':
print()
print()
print()
print()
return True
elif answer == 'нет':
return False
else:
print('Ваш ответ не понятен. Попробуем еще раз.')
def mainGame(themes):
missedLetters = ''
correctLetters = ''
alfabet_board = ''
print()
print(
""" Добро пожаловать в игру ВИСЕЛИЦА!
У Вас есть 6 попыток угадать слово по заданной теме.
После каждой неверной попытки к рисунку будет добавляться часть человечка.
Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!
Удачи!
"""
)
print()
input('Нажмите ENTER для старта.')
theme, secretWord = getRandomWord(themes)
while True:
displayBoard(missedLetters, correctLetters, secretWord,
alfabet_board, theme)
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
guess = getGuess(correctLetters, missedLetters)
if guess in secretWord:
print('Такая буква есть в слове!')
correctLetters += guess
time.sleep(2)
else:
print('Такой буквы нет в слове!')
missedLetters += guess
time.sleep(2)
import random
import time
hangnam_pics = ["""
+---+
|
|
|
===""",
'\n +---+\n O |\n |\n |\n ===',
"""
+---+
O |
| |
|
===""",
'\n +---+\n O |\n |\\ |\n |\n ===',
"""
+---+
O |
/|\\ |
|
===""",
' \n +---+\n O |\n /|\\ |\n / |\n ===',
"""
+---+
O |
/|\\ |
/ \\ |
==="""]
alfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',
'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',
'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']
goroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',
'Полтава', 'Чернигов']
zhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',
'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',
'муравьед', 'панда', 'ленивец']
themes = {'Города Украины': goroda, 'Животные': zhyvotnye}
mainGame(themes)
print()
print(' ВСЕГО ДОБРОГО!')
<|reserved_special_token_1|>
# ----------------------
#
# *** WELCOME TO "HANGMAN" GAME ***
# Let's start programming
#
# ----------------------
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board, theme):
print(hangnam_pics[len(missedLetters)])
print("Тема:", theme)
# Показываем состояние угадываемого слова на сейчас
for index in range(len(secretWord)):
dashed_word = ""
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + " "
else:
dashed_word += "_ "
print("Слово на доске: ", dashed_word)
# Показываем остальные буквы, доступные к угадыванию
for index in range (len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += "_ "
else:
alfabet_board = alfabet_board + alfabet[index] + " "
print("Оставшиеся буквы: ", alfabet_board)
#Показываем список ошибочных букв
print("Ошибочные буквы: ", end = "")
if missedLetters == "":
print(" -", end="")
else:
for letter in missedLetters:
print(letter + " ", end="")
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input("Введите букву --> ").upper()
if len(guess) != 1:
print("Пожалуйста, введите одну букву.")
elif guess in correctLetters or guess in missedLetters:
print("Вы уже называли эту букву")
elif guess in (" _") or guess not in alfabet or type(guess) != str:
print("Это не буква. Введите БУКВУ")
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(f''' ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!''')
return True
elif len(missedLetters) == 6:
print()
print()
print(f''' ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!''')
return True
else:
return False
def oneMore():
while True:
print()
answer = input("Хотите сыграть еще раз? Введите да/нет --->").lower()
if answer == "да":
print()
print()
print()
print()
return True
elif answer == "нет":
return False
else:
print("Ваш ответ не понятен. Попробуем еще раз.")
def mainGame(themes):
missedLetters = ""
correctLetters = ""
alfabet_board = ""
print()
print(
''' Добро пожаловать в игру ВИСЕЛИЦА!
У Вас есть 6 попыток угадать слово по заданной теме.
После каждой неверной попытки к рисунку будет добавляться часть человечка.
Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!
Удачи!
''')
print()
input("Нажмите ENTER для старта.")
#Выбираем секретное слово
theme, secretWord = getRandomWord(themes)
while True:
#Показываем текущее состояние игры
displayBoard(missedLetters , correctLetters, secretWord, alfabet_board, theme)
#Проверка результатов Игры - пишется последним
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
#Запрос пользователю на введение буквы. Проверка буквы.
guess = getGuess(correctLetters, missedLetters)
#Сверка буквы и запись в соответствующий массив
if guess in secretWord:
print("Такая буква есть в слове!")
correctLetters += guess
time.sleep(2)
else:
print("Такой буквы нет в слове!")
missedLetters += guess
time.sleep(2)
import random
import time
hangnam_pics = [
'''
+---+
|
|
|
===''',
'''
+---+
O |
|
|
===''',
'''
+---+
O |
| |
|
===''',
'''
+---+
O |
|\ |
|
===''',
'''
+---+
O |
/|\ |
|
===''',
'''
+---+
O |
/|\ |
/ |
===''',
'''
+---+
O |
/|\ |
/ \ |
==='''
]
alfabet = ["А","Б","В","Г","Д","Е","Ë","Ж","З","И","Й","К","Л","М","Н","О","П","Р","С","Т","У","Ф", "Х","Ч","Ц","Ч","Ш","Щ","Ь","Ъ","Ы","Э","Ю","Я"]
goroda = ["Киев", "Одесса", "Харьков", "Львов", "Николаев", "Житомир", "Полтава", "Чернигов"]
zhyvotnye = ["аист","акула","бабуин","баран", "тритон", "черепаха", "ястреб", "ящерица", "муравей","барсук","медведь", "медоед", "муравьед", "панда", "ленивец"]
themes = {"Города Украины": goroda, "Животные": zhyvotnye}
mainGame(themes)
print()
print(" ВСЕГО ДОБРОГО!")
|
flexible
|
{
"blob_id": "720ab0c0fcb40a50d73770e4ada6a78465e9ff96",
"index": 2755,
"step-1": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-2": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\n<mask token>\n",
"step-3": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\n<mask token>\nhangnam_pics = [\"\"\"\n +---+\n |\n |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n | |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\\\ |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n /|\\\\ |\n |\n ===\"\"\",\n ' \\n +---+\\n O |\\n /|\\\\ |\\n / |\\n ===',\n \"\"\" \n +---+\n O |\n /|\\\\ |\n / \\\\ |\n ===\"\"\"]\nalfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',\n 'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',\n 'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']\ngoroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',\n 'Полтава', 'Чернигов']\nzhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',\n 'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',\n 'муравьед', 'панда', 'ленивец']\nthemes = {'Города Украины': goroda, 'Животные': zhyvotnye}\nmainGame(themes)\nprint()\nprint(' ВСЕГО ДОБРОГО!')\n",
"step-4": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\nimport random\nimport time\nhangnam_pics = [\"\"\"\n +---+\n |\n |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n | |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\\\ |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n /|\\\\ |\n |\n ===\"\"\",\n ' \\n +---+\\n O |\\n /|\\\\ |\\n / |\\n ===',\n \"\"\" \n +---+\n O |\n /|\\\\ |\n / \\\\ |\n ===\"\"\"]\nalfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',\n 'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',\n 'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']\ngoroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',\n 'Полтава', 'Чернигов']\nzhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',\n 'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',\n 'муравьед', 'панда', 'ленивец']\nthemes = {'Города Украины': goroda, 'Животные': zhyvotnye}\nmainGame(themes)\nprint()\nprint(' ВСЕГО ДОБРОГО!')\n",
"step-5": "\n# ----------------------\n#\n# *** WELCOME TO \"HANGMAN\" GAME ***\n# Let's start programming\n#\n# ----------------------\n\n\n\ndef displayBoard(missedLetters, correctLetters, secretWord, alfabet_board, theme):\n print(hangnam_pics[len(missedLetters)])\n print(\"Тема:\", theme)\n\n # Показываем состояние угадываемого слова на сейчас\n for index in range(len(secretWord)):\n dashed_word = \"\"\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + \" \"\n else:\n dashed_word += \"_ \"\n print(\"Слово на доске: \", dashed_word)\n\n\n # Показываем остальные буквы, доступные к угадыванию\n for index in range (len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += \"_ \"\n else:\n alfabet_board = alfabet_board + alfabet[index] + \" \"\n print(\"Оставшиеся буквы: \", alfabet_board)\n\n\n #Показываем список ошибочных букв\n print(\"Ошибочные буквы: \", end = \"\")\n if missedLetters == \"\":\n print(\" -\", end=\"\")\n else:\n for letter in missedLetters:\n print(letter + \" \", end=\"\")\n print()\n\n\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input(\"Введите букву --> \").upper()\n if len(guess) != 1:\n print(\"Пожалуйста, введите одну букву.\")\n elif guess in correctLetters or guess in missedLetters:\n print(\"Вы уже называли эту букву\")\n elif guess in (\" _\") or guess not in alfabet or type(guess) != str:\n print(\"Это не буква. Введите БУКВУ\")\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(f''' ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!''')\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(f''' ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!''')\n return True\n else:\n return False\n\ndef oneMore():\n while True:\n print()\n answer = input(\"Хотите сыграть еще раз? Введите да/нет --->\").lower()\n if answer == \"да\":\n print()\n print()\n print()\n print()\n return True\n elif answer == \"нет\":\n return False\n else:\n print(\"Ваш ответ не понятен. Попробуем еще раз.\")\n\n\n\n\n\n\ndef mainGame(themes):\n missedLetters = \"\"\n correctLetters = \"\"\n alfabet_board = \"\"\n\n print()\n print(\n ''' Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n ''')\n print()\n input(\"Нажмите ENTER для старта.\")\n #Выбираем секретное слово\n theme, secretWord = getRandomWord(themes)\n\n\n while True:\n #Показываем текущее состояние игры\n displayBoard(missedLetters , correctLetters, secretWord, alfabet_board, theme)\n\n #Проверка результатов Игры - пишется последним\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n\n #Запрос пользователю на введение буквы. Проверка буквы.\n guess = getGuess(correctLetters, missedLetters)\n\n #Сверка буквы и запись в соответствующий массив\n if guess in secretWord:\n print(\"Такая буква есть в слове!\")\n correctLetters += guess\n time.sleep(2)\n else:\n print(\"Такой буквы нет в слове!\")\n missedLetters += guess\n time.sleep(2)\n\n\n\nimport random\nimport time\n\nhangnam_pics = [\n '''\n +---+\n |\n |\n |\n ===''',\n '''\n +---+\n O |\n |\n |\n ===''',\n '''\n +---+\n O |\n | |\n |\n ===''',\n '''\n +---+\n O |\n |\\ |\n |\n ===''',\n '''\n +---+\n O |\n /|\\ |\n |\n ===''',\n ''' \n +---+\n O |\n /|\\ |\n / |\n ===''',\n ''' \n +---+\n O |\n /|\\ |\n / \\ |\n ==='''\n ]\nalfabet = [\"А\",\"Б\",\"В\",\"Г\",\"Д\",\"Е\",\"Ë\",\"Ж\",\"З\",\"И\",\"Й\",\"К\",\"Л\",\"М\",\"Н\",\"О\",\"П\",\"Р\",\"С\",\"Т\",\"У\",\"Ф\", \"Х\",\"Ч\",\"Ц\",\"Ч\",\"Ш\",\"Щ\",\"Ь\",\"Ъ\",\"Ы\",\"Э\",\"Ю\",\"Я\"]\ngoroda = [\"Киев\", \"Одесса\", \"Харьков\", \"Львов\", \"Николаев\", \"Житомир\", \"Полтава\", \"Чернигов\"]\nzhyvotnye = [\"аист\",\"акула\",\"бабуин\",\"баран\", \"тритон\", \"черепаха\", \"ястреб\", \"ящерица\", \"муравей\",\"барсук\",\"медведь\", \"медоед\", \"муравьед\", \"панда\", \"ленивец\"]\nthemes = {\"Города Украины\": goroda, \"Животные\": zhyvotnye}\n\nmainGame(themes)\nprint()\nprint(\" ВСЕГО ДОБРОГО!\")\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, 4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, 'lxml')
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1, 4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, 'lxml')
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
<|reserved_special_token_1|>
import urllib, bs4, requests
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1, 4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, 'lxml')
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
<|reserved_special_token_1|>
# Comic Downloader
#! python3
import urllib, bs4, requests
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1,4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, "lxml")
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
|
flexible
|
{
"blob_id": "66e77b8237850a29127402310bfab3061f7ebca4",
"index": 2346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-3": "<mask token>\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-4": "import urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-5": "# Comic Downloader\n\n#! python3\n\nimport urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\n\nfor i in range(1,4000):\n\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, \"lxml\")\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def make_album_two(artist_name, album_title, number_of_songs=None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
return music_album
<|reserved_special_token_0|>
def make_album_two(artist_name, album_title, number_of_songs=None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
return music_album
print("Here's Part One:")
<|reserved_special_token_0|>
print(cardi)
<|reserved_special_token_0|>
print(jhene)
<|reserved_special_token_0|>
print(lennon)
def make_album_two(artist_name, album_title, number_of_songs=None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
print("""
Here's Part Two:""")
<|reserved_special_token_0|>
print(cardi)
<|reserved_special_token_0|>
print(jhene)
<|reserved_special_token_0|>
print(lennon)
<|reserved_special_token_1|>
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
return music_album
print("Here's Part One:")
cardi = make_album('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album('jhene aiko', 'souled out')
print(jhene)
lennon = make_album('lennon stella', 'three. two. one.')
print(lennon)
def make_album_two(artist_name, album_title, number_of_songs=None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
print("""
Here's Part Two:""")
cardi = make_album_two('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album_two('jhene aiko', 'souled out')
print(jhene)
lennon = make_album_two('lennon stella', 'three. two. one.', 13)
print(lennon)
<|reserved_special_token_1|>
# 8-7. Album: Write a function called make_album() that builds a dictionary
# describing a music album. The function should take in an artist name and an
# album title, and it should return a dictionary containing these two pieces
# of information. Use the function to make three dictionaries representing
# different albums. Print each return value to show that the dictionaries are
# storing the album information correctly. Use None to add an optional
# parameter to make_album() that allows you to store the number of songs on an
# album. If the calling line includes a value for the number of songs, add
# that value to the album’s dictionary. Make at least one new function call
# that includes the number of songs on an album.
# PART ONE
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {
'Artist': artist_name.title(),
'Album': album_title.title()
}
return music_album
print("Here's Part One:")
cardi = make_album('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album('jhene aiko', 'souled out')
print(jhene)
lennon = make_album('lennon stella', 'three. two. one.')
print(lennon)
# PART TWO
def make_album_two(artist_name, album_title, number_of_songs= None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(),
'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
print("\nHere's Part Two:")
cardi = make_album_two('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album_two('jhene aiko', 'souled out')
print(jhene)
lennon = make_album_two('lennon stella', 'three. two. one.', 13)
print(lennon)
|
flexible
|
{
"blob_id": "19888c998e8787533e84413272da1183f16fcdb1",
"index": 2974,
"step-1": "<mask token>\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\n<mask token>\n",
"step-2": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\n<mask token>\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\n<mask token>\n",
"step-3": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\nprint(\"Here's Part One:\")\n<mask token>\nprint(cardi)\n<mask token>\nprint(jhene)\n<mask token>\nprint(lennon)\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\nprint(\"\"\"\nHere's Part Two:\"\"\")\n<mask token>\nprint(cardi)\n<mask token>\nprint(jhene)\n<mask token>\nprint(lennon)\n",
"step-4": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\nprint(\"Here's Part One:\")\ncardi = make_album('cardi b', 'invasion of privacy')\nprint(cardi)\njhene = make_album('jhene aiko', 'souled out')\nprint(jhene)\nlennon = make_album('lennon stella', 'three. two. one.')\nprint(lennon)\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\nprint(\"\"\"\nHere's Part Two:\"\"\")\ncardi = make_album_two('cardi b', 'invasion of privacy')\nprint(cardi)\njhene = make_album_two('jhene aiko', 'souled out')\nprint(jhene)\nlennon = make_album_two('lennon stella', 'three. two. one.', 13)\nprint(lennon)\n",
"step-5": "# 8-7. Album: Write a function called make_album() that builds a dictionary\n# describing a music album. The function should take in an artist name and an\n# album title, and it should return a dictionary containing these two pieces\n# of information. Use the function to make three dictionaries representing\n# different albums. Print each return value to show that the dictionaries are\n# storing the album information correctly. Use None to add an optional\n# parameter to make_album() that allows you to store the number of songs on an\n# album. If the calling line includes a value for the number of songs, add\n# that value to the album’s dictionary. Make at least one new function call\n# that includes the number of songs on an album.\n\n# PART ONE\n\ndef make_album(artist_name, album_title): \n \"\"\"Build a dictionary describing a music album\"\"\" \n music_album = {\n 'Artist': artist_name.title(),\n 'Album': album_title.title()\n }\n return music_album\n\nprint(\"Here's Part One:\")\ncardi = make_album('cardi b', 'invasion of privacy')\nprint(cardi)\n\njhene = make_album('jhene aiko', 'souled out')\nprint(jhene)\n\nlennon = make_album('lennon stella', 'three. two. one.')\nprint(lennon)\n\n# PART TWO\ndef make_album_two(artist_name, album_title, number_of_songs= None): \n \"\"\"Build a dictionary describing a music album\"\"\" \n music_album = {'Artist': artist_name.title(),\n 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\nprint(\"\\nHere's Part Two:\")\ncardi = make_album_two('cardi b', 'invasion of privacy')\nprint(cardi)\n\njhene = make_album_two('jhene aiko', 'souled out')\nprint(jhene)\n\nlennon = make_album_two('lennon stella', 'three. two. one.', 13)\nprint(lennon)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from contextlib import contextmanager
import yaml
from omegaconf import OmegaConf
class CrypTenConfig:
"""
Configuration object used to store configurable parameters for CrypTen.
This object acts as a nested dictionary, but can be queried using dot-notation(
e.g. querying or setting `cfg.a.b` is equivalent to `cfg['a']['b']`).
Users can load a CrypTen config from a file using `cfg.load_config(filepath)`.
Users can temporarily override a config parameter using the contextmanager temp_override:
.. code-block:: python
cfg.a.b = outer # sets cfg["a"]["b"] to outer value
with cfg.temp_override("a.b", inner):
print(cfg.a.b) # prints inner value
print(cfg.a.b) # prints outer value
"""
__DEFAULT_CONFIG_PATH = os.path.normpath(
os.path.join(__file__, "../../../configs/default.yaml")
)
def __init__(self, config_file=None):
self.load_config(config_file)
def load_config(self, config_file):
"""Loads config from a yaml file"""
if config_file is None:
config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH
# Use yaml to open stream for safe load
with open(config_file) as stream:
config_dict = yaml.safe_load(stream)
self.config = OmegaConf.create(config_dict)
def set_config(self, config):
if isinstance(config, CrypTenConfig):
self.config = config.config
else:
self.config = config
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
keys = name.split(".")
result = getattr(self.config, keys[0])
for key in keys[1:]:
result = getattr(result, key)
return result
def __getitem__(self, name):
return self.__getattribute__(name)
def __setattr__(self, name, value):
if name == "config":
object.__setattr__(self, name, value)
try:
# Can only set attribute if already exists
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
dotlist = [f"{name}={value}"]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
def __setitem__(self, name, value):
self.__setattr__(name, value)
@contextmanager
def temp_override(self, override_dict):
old_config = self.config
try:
dotlist = [f"{k}={v}" for k, v in override_dict.items()]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
yield
finally:
self.config = old_config
|
normal
|
{
"blob_id": "501ca508df5d72b0190b933f07c4bd505d7090c0",
"index": 6464,
"step-1": "<mask token>\n\n\nclass CrypTenConfig:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @contextmanager\n def temp_override(self, override_dict):\n old_config = self.config\n try:\n dotlist = [f'{k}={v}' for k, v in override_dict.items()]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n yield\n finally:\n self.config = old_config\n",
"step-2": "<mask token>\n\n\nclass CrypTenConfig:\n <mask token>\n <mask token>\n\n def __init__(self, config_file=None):\n self.load_config(config_file)\n <mask token>\n\n def set_config(self, config):\n if isinstance(config, CrypTenConfig):\n self.config = config.config\n else:\n self.config = config\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n keys = name.split('.')\n result = getattr(self.config, keys[0])\n for key in keys[1:]:\n result = getattr(result, key)\n return result\n\n def __getitem__(self, name):\n return self.__getattribute__(name)\n <mask token>\n\n def __setitem__(self, name, value):\n self.__setattr__(name, value)\n\n @contextmanager\n def temp_override(self, override_dict):\n old_config = self.config\n try:\n dotlist = [f'{k}={v}' for k, v in override_dict.items()]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n yield\n finally:\n self.config = old_config\n",
"step-3": "<mask token>\n\n\nclass CrypTenConfig:\n <mask token>\n __DEFAULT_CONFIG_PATH = os.path.normpath(os.path.join(__file__,\n '../../../configs/default.yaml'))\n\n def __init__(self, config_file=None):\n self.load_config(config_file)\n\n def load_config(self, config_file):\n \"\"\"Loads config from a yaml file\"\"\"\n if config_file is None:\n config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH\n with open(config_file) as stream:\n config_dict = yaml.safe_load(stream)\n self.config = OmegaConf.create(config_dict)\n\n def set_config(self, config):\n if isinstance(config, CrypTenConfig):\n self.config = config.config\n else:\n self.config = config\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n keys = name.split('.')\n result = getattr(self.config, keys[0])\n for key in keys[1:]:\n result = getattr(result, key)\n return result\n\n def __getitem__(self, name):\n return self.__getattribute__(name)\n\n def __setattr__(self, name, value):\n if name == 'config':\n object.__setattr__(self, name, value)\n try:\n object.__getattribute__(self, name)\n object.__setattr__(self, name, value)\n except AttributeError:\n dotlist = [f'{name}={value}']\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n\n def __setitem__(self, name, value):\n self.__setattr__(name, value)\n\n @contextmanager\n def temp_override(self, override_dict):\n old_config = self.config\n try:\n dotlist = [f'{k}={v}' for k, v in override_dict.items()]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n yield\n finally:\n self.config = old_config\n",
"step-4": "import os\nfrom contextlib import contextmanager\nimport yaml\nfrom omegaconf import OmegaConf\n\n\nclass CrypTenConfig:\n \"\"\"\n Configuration object used to store configurable parameters for CrypTen.\n\n This object acts as a nested dictionary, but can be queried using dot-notation(\n e.g. querying or setting `cfg.a.b` is equivalent to `cfg['a']['b']`).\n\n Users can load a CrypTen config from a file using `cfg.load_config(filepath)`.\n\n Users can temporarily override a config parameter using the contextmanager temp_override:\n\n .. code-block:: python\n\n cfg.a.b = outer # sets cfg[\"a\"][\"b\"] to outer value\n\n with cfg.temp_override(\"a.b\", inner):\n print(cfg.a.b) # prints inner value\n\n print(cfg.a.b) # prints outer value\n \"\"\"\n __DEFAULT_CONFIG_PATH = os.path.normpath(os.path.join(__file__,\n '../../../configs/default.yaml'))\n\n def __init__(self, config_file=None):\n self.load_config(config_file)\n\n def load_config(self, config_file):\n \"\"\"Loads config from a yaml file\"\"\"\n if config_file is None:\n config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH\n with open(config_file) as stream:\n config_dict = yaml.safe_load(stream)\n self.config = OmegaConf.create(config_dict)\n\n def set_config(self, config):\n if isinstance(config, CrypTenConfig):\n self.config = config.config\n else:\n self.config = config\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n keys = name.split('.')\n result = getattr(self.config, keys[0])\n for key in keys[1:]:\n result = getattr(result, key)\n return result\n\n def __getitem__(self, name):\n return self.__getattribute__(name)\n\n def __setattr__(self, name, value):\n if name == 'config':\n object.__setattr__(self, name, value)\n try:\n object.__getattribute__(self, name)\n object.__setattr__(self, name, value)\n except AttributeError:\n dotlist = [f'{name}={value}']\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n\n def __setitem__(self, name, value):\n self.__setattr__(name, value)\n\n @contextmanager\n def temp_override(self, override_dict):\n old_config = self.config\n try:\n dotlist = [f'{k}={v}' for k, v in override_dict.items()]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n yield\n finally:\n self.config = old_config\n",
"step-5": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom contextlib import contextmanager\n\nimport yaml\nfrom omegaconf import OmegaConf\n\n\nclass CrypTenConfig:\n \"\"\"\n Configuration object used to store configurable parameters for CrypTen.\n\n This object acts as a nested dictionary, but can be queried using dot-notation(\n e.g. querying or setting `cfg.a.b` is equivalent to `cfg['a']['b']`).\n\n Users can load a CrypTen config from a file using `cfg.load_config(filepath)`.\n\n Users can temporarily override a config parameter using the contextmanager temp_override:\n\n .. code-block:: python\n\n cfg.a.b = outer # sets cfg[\"a\"][\"b\"] to outer value\n\n with cfg.temp_override(\"a.b\", inner):\n print(cfg.a.b) # prints inner value\n\n print(cfg.a.b) # prints outer value\n \"\"\"\n\n __DEFAULT_CONFIG_PATH = os.path.normpath(\n os.path.join(__file__, \"../../../configs/default.yaml\")\n )\n\n def __init__(self, config_file=None):\n self.load_config(config_file)\n\n def load_config(self, config_file):\n \"\"\"Loads config from a yaml file\"\"\"\n if config_file is None:\n config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH\n\n # Use yaml to open stream for safe load\n with open(config_file) as stream:\n config_dict = yaml.safe_load(stream)\n self.config = OmegaConf.create(config_dict)\n\n def set_config(self, config):\n if isinstance(config, CrypTenConfig):\n self.config = config.config\n else:\n self.config = config\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n keys = name.split(\".\")\n result = getattr(self.config, keys[0])\n for key in keys[1:]:\n result = getattr(result, key)\n return result\n\n def __getitem__(self, name):\n return self.__getattribute__(name)\n\n def __setattr__(self, name, value):\n if name == \"config\":\n object.__setattr__(self, name, value)\n try:\n # Can only set attribute if already exists\n object.__getattribute__(self, name)\n object.__setattr__(self, name, value)\n except AttributeError:\n dotlist = [f\"{name}={value}\"]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n\n def __setitem__(self, name, value):\n self.__setattr__(name, value)\n\n @contextmanager\n def temp_override(self, override_dict):\n old_config = self.config\n try:\n dotlist = [f\"{k}={v}\" for k, v in override_dict.items()]\n update = OmegaConf.from_dotlist(dotlist)\n self.config = OmegaConf.merge(self.config, update)\n yield\n finally:\n self.config = old_config\n",
"step-ids": [
2,
7,
10,
12,
13
]
}
|
[
2,
7,
10,
12,
13
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.