code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SoapySDR', [dirname(__file__)])
except ImportError:
import _SoapySDR
return _SoapySDR
if fp is not None:
try:
_mod = imp.load_module('_SoapySDR', fp, pathname, description)
finally:
fp.close()
return _mod
_SoapySDR = swig_import_helper()
del swig_import_helper
else:
import _SoapySDR
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _SoapySDR.SwigPyIterator_value(self)
def incr(self, n=1): return _SoapySDR.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _SoapySDR.SwigPyIterator_decr(self, n)
def distance(self, *args): return _SoapySDR.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _SoapySDR.SwigPyIterator_equal(self, *args)
def copy(self): return _SoapySDR.SwigPyIterator_copy(self)
def next(self): return _SoapySDR.SwigPyIterator_next(self)
def __next__(self): return _SoapySDR.SwigPyIterator___next__(self)
def previous(self): return _SoapySDR.SwigPyIterator_previous(self)
def advance(self, *args): return _SoapySDR.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _SoapySDR.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _SoapySDR.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _SoapySDR.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _SoapySDR.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _SoapySDR.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _SoapySDR.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _SoapySDR.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def KwargsFromString(*args):
return _SoapySDR.KwargsFromString(*args)
KwargsFromString = _SoapySDR.KwargsFromString
def KwargsToString(*args):
return _SoapySDR.KwargsToString(*args)
KwargsToString = _SoapySDR.KwargsToString
class Range(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Range, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Range, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SoapySDR.new_Range(*args)
try: self.this.append(this)
except: self.this = this
def minimum(self): return _SoapySDR.Range_minimum(self)
def maximum(self): return _SoapySDR.Range_maximum(self)
def step(self): return _SoapySDR.Range_step(self)
def __str__(self):
fields = [self.minimum(), self.maximum()]
if self.step() != 0.0: fields.append(self.step())
return ', '.join(['%g'%f for f in fields])
__swig_destroy__ = _SoapySDR.delete_Range
__del__ = lambda self : None;
Range_swigregister = _SoapySDR.Range_swigregister
Range_swigregister(Range)
class ArgInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_ArgInfo()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["key"] = _SoapySDR.ArgInfo_key_set
__swig_getmethods__["key"] = _SoapySDR.ArgInfo_key_get
if _newclass:key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.ArgInfo_key_set)
__swig_setmethods__["value"] = _SoapySDR.ArgInfo_value_set
__swig_getmethods__["value"] = _SoapySDR.ArgInfo_value_get
if _newclass:value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.ArgInfo_value_set)
__swig_setmethods__["name"] = _SoapySDR.ArgInfo_name_set
__swig_getmethods__["name"] = _SoapySDR.ArgInfo_name_get
if _newclass:name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.ArgInfo_name_set)
__swig_setmethods__["description"] = _SoapySDR.ArgInfo_description_set
__swig_getmethods__["description"] = _SoapySDR.ArgInfo_description_get
if _newclass:description = _swig_property(_SoapySDR.ArgInfo_description_get, _SoapySDR.ArgInfo_description_set)
__swig_setmethods__["units"] = _SoapySDR.ArgInfo_units_set
__swig_getmethods__["units"] = _SoapySDR.ArgInfo_units_get
if _newclass:units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.ArgInfo_units_set)
BOOL = _SoapySDR.ArgInfo_BOOL
INT = _SoapySDR.ArgInfo_INT
FLOAT = _SoapySDR.ArgInfo_FLOAT
STRING = _SoapySDR.ArgInfo_STRING
__swig_setmethods__["type"] = _SoapySDR.ArgInfo_type_set
__swig_getmethods__["type"] = _SoapySDR.ArgInfo_type_get
if _newclass:type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.ArgInfo_type_set)
__swig_setmethods__["range"] = _SoapySDR.ArgInfo_range_set
__swig_getmethods__["range"] = _SoapySDR.ArgInfo_range_get
if _newclass:range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.ArgInfo_range_set)
__swig_setmethods__["options"] = _SoapySDR.ArgInfo_options_set
__swig_getmethods__["options"] = _SoapySDR.ArgInfo_options_get
if _newclass:options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.ArgInfo_options_set)
__swig_setmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_set
__swig_getmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_get
if _newclass:optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get, _SoapySDR.ArgInfo_optionNames_set)
__swig_destroy__ = _SoapySDR.delete_ArgInfo
__del__ = lambda self : None;
ArgInfo_swigregister = _SoapySDR.ArgInfo_swigregister
ArgInfo_swigregister(ArgInfo)
class SoapySDRKwargs(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargs, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargs_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargs___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargs___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargs___len__(self)
def __iter__(self): return self.key_iterator()
def iterkeys(self): return self.key_iterator()
def itervalues(self): return self.value_iterator()
def iteritems(self): return self.iterator()
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)
def has_key(self, *args): return _SoapySDR.SoapySDRKwargs_has_key(self, *args)
def keys(self): return _SoapySDR.SoapySDRKwargs_keys(self)
def values(self): return _SoapySDR.SoapySDRKwargs_values(self)
def items(self): return _SoapySDR.SoapySDRKwargs_items(self)
def __contains__(self, *args): return _SoapySDR.SoapySDRKwargs___contains__(self, *args)
def key_iterator(self): return _SoapySDR.SoapySDRKwargs_key_iterator(self)
def value_iterator(self): return _SoapySDR.SoapySDRKwargs_value_iterator(self)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)
def asdict(self): return _SoapySDR.SoapySDRKwargs_asdict(self)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargs(*args)
try: self.this.append(this)
except: self.this = this
def empty(self): return _SoapySDR.SoapySDRKwargs_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargs_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargs_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargs_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargs_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargs_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargs_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargs_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargs_rend(self)
def count(self, *args): return _SoapySDR.SoapySDRKwargs_count(self, *args)
def erase(self, *args): return _SoapySDR.SoapySDRKwargs_erase(self, *args)
def find(self, *args): return _SoapySDR.SoapySDRKwargs_find(self, *args)
def lower_bound(self, *args): return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)
def upper_bound(self, *args): return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)
def __str__(self):
out = list()
for k, v in self.iteritems():
out.append("%s=%s"%(k, v))
return '{'+(', '.join(out))+'}'
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs
__del__ = lambda self : None;
SoapySDRKwargs_swigregister = _SoapySDR.SoapySDRKwargs_swigregister
SoapySDRKwargs_swigregister(SoapySDRKwargs)
class SoapySDRKwargsList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargsList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargsList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargsList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargsList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargsList___len__(self)
def pop(self): return _SoapySDR.SoapySDRKwargsList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRKwargsList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRKwargsList_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargsList_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargsList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargsList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargsList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargsList_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargsList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargsList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargsList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRKwargsList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRKwargsList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargsList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRKwargsList_front(self)
def back(self): return _SoapySDR.SoapySDRKwargsList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRKwargsList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRKwargsList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRKwargsList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRKwargsList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList
__del__ = lambda self : None;
SoapySDRKwargsList_swigregister = _SoapySDR.SoapySDRKwargsList_swigregister
SoapySDRKwargsList_swigregister(SoapySDRKwargsList)
class SoapySDRArgInfoList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRArgInfoList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRArgInfoList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRArgInfoList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRArgInfoList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRArgInfoList___len__(self)
def pop(self): return _SoapySDR.SoapySDRArgInfoList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRArgInfoList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRArgInfoList_empty(self)
def size(self): return _SoapySDR.SoapySDRArgInfoList_size(self)
def clear(self): return _SoapySDR.SoapySDRArgInfoList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRArgInfoList_begin(self)
def end(self): return _SoapySDR.SoapySDRArgInfoList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRArgInfoList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRArgInfoList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRArgInfoList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRArgInfoList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRArgInfoList_front(self)
def back(self): return _SoapySDR.SoapySDRArgInfoList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRArgInfoList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList
__del__ = lambda self : None;
SoapySDRArgInfoList_swigregister = _SoapySDR.SoapySDRArgInfoList_swigregister
SoapySDRArgInfoList_swigregister(SoapySDRArgInfoList)
class SoapySDRStringList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRStringList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRStringList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRStringList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRStringList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRStringList___len__(self)
def pop(self): return _SoapySDR.SoapySDRStringList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRStringList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRStringList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRStringList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRStringList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRStringList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRStringList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRStringList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRStringList_empty(self)
def size(self): return _SoapySDR.SoapySDRStringList_size(self)
def clear(self): return _SoapySDR.SoapySDRStringList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRStringList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRStringList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRStringList_begin(self)
def end(self): return _SoapySDR.SoapySDRStringList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRStringList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRStringList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRStringList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRStringList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRStringList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRStringList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRStringList_front(self)
def back(self): return _SoapySDR.SoapySDRStringList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRStringList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRStringList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRStringList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRStringList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRStringList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRStringList
__del__ = lambda self : None;
SoapySDRStringList_swigregister = _SoapySDR.SoapySDRStringList_swigregister
SoapySDRStringList_swigregister(SoapySDRStringList)
class SoapySDRRangeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRRangeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRRangeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRRangeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRRangeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRRangeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRRangeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRRangeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRRangeList_empty(self)
def size(self): return _SoapySDR.SoapySDRRangeList_size(self)
def clear(self): return _SoapySDR.SoapySDRRangeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRRangeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRRangeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRRangeList_begin(self)
def end(self): return _SoapySDR.SoapySDRRangeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRRangeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRRangeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRRangeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRRangeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRRangeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRRangeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRRangeList_front(self)
def back(self): return _SoapySDR.SoapySDRRangeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRRangeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRRangeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRRangeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRRangeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRRangeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList
__del__ = lambda self : None;
SoapySDRRangeList_swigregister = _SoapySDR.SoapySDRRangeList_swigregister
SoapySDRRangeList_swigregister(SoapySDRRangeList)
class SoapySDRSizeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRSizeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRSizeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRSizeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRSizeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRSizeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRSizeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRSizeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRSizeList_empty(self)
def size(self): return _SoapySDR.SoapySDRSizeList_size(self)
def clear(self): return _SoapySDR.SoapySDRSizeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRSizeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRSizeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRSizeList_begin(self)
def end(self): return _SoapySDR.SoapySDRSizeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRSizeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRSizeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRSizeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRSizeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRSizeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRSizeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRSizeList_front(self)
def back(self): return _SoapySDR.SoapySDRSizeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRSizeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRSizeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRSizeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRSizeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRSizeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList
__del__ = lambda self : None;
SoapySDRSizeList_swigregister = _SoapySDR.SoapySDRSizeList_swigregister
SoapySDRSizeList_swigregister(SoapySDRSizeList)
class SoapySDRDoubleList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRDoubleList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRDoubleList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRDoubleList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRDoubleList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRDoubleList___len__(self)
def pop(self): return _SoapySDR.SoapySDRDoubleList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRDoubleList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRDoubleList_empty(self)
def size(self): return _SoapySDR.SoapySDRDoubleList_size(self)
def clear(self): return _SoapySDR.SoapySDRDoubleList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRDoubleList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRDoubleList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRDoubleList_begin(self)
def end(self): return _SoapySDR.SoapySDRDoubleList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRDoubleList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRDoubleList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRDoubleList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRDoubleList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRDoubleList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRDoubleList_front(self)
def back(self): return _SoapySDR.SoapySDRDoubleList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRDoubleList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRDoubleList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRDoubleList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRDoubleList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList
__del__ = lambda self : None;
SoapySDRDoubleList_swigregister = _SoapySDR.SoapySDRDoubleList_swigregister
SoapySDRDoubleList_swigregister(SoapySDRDoubleList)
class StreamResult(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StreamResult, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_StreamResult()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["ret"] = _SoapySDR.StreamResult_ret_set
__swig_getmethods__["ret"] = _SoapySDR.StreamResult_ret_get
if _newclass:ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.StreamResult_ret_set)
__swig_setmethods__["flags"] = _SoapySDR.StreamResult_flags_set
__swig_getmethods__["flags"] = _SoapySDR.StreamResult_flags_get
if _newclass:flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.StreamResult_flags_set)
__swig_setmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_set
__swig_getmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_get
if _newclass:timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get, _SoapySDR.StreamResult_timeNs_set)
__swig_setmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_set
__swig_getmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_get
if _newclass:chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get, _SoapySDR.StreamResult_chanMask_set)
def __str__(self):
return "ret=%s, flags=%s, timeNs=%s"%(self.ret, self.flags, self.timeNs)
__swig_destroy__ = _SoapySDR.delete_StreamResult
__del__ = lambda self : None;
StreamResult_swigregister = _SoapySDR.StreamResult_swigregister
StreamResult_swigregister(StreamResult)
SOAPY_SDR_TX = _SoapySDR.SOAPY_SDR_TX
SOAPY_SDR_RX = _SoapySDR.SOAPY_SDR_RX
SOAPY_SDR_END_BURST = _SoapySDR.SOAPY_SDR_END_BURST
SOAPY_SDR_HAS_TIME = _SoapySDR.SOAPY_SDR_HAS_TIME
SOAPY_SDR_END_ABRUPT = _SoapySDR.SOAPY_SDR_END_ABRUPT
SOAPY_SDR_ONE_PACKET = _SoapySDR.SOAPY_SDR_ONE_PACKET
SOAPY_SDR_MORE_FRAGMENTS = _SoapySDR.SOAPY_SDR_MORE_FRAGMENTS
SOAPY_SDR_WAIT_TRIGGER = _SoapySDR.SOAPY_SDR_WAIT_TRIGGER
def SoapySDR_errToStr(*args):
return _SoapySDR.SoapySDR_errToStr(*args)
SoapySDR_errToStr = _SoapySDR.SoapySDR_errToStr
SOAPY_SDR_TIMEOUT = _SoapySDR.SOAPY_SDR_TIMEOUT
SOAPY_SDR_STREAM_ERROR = _SoapySDR.SOAPY_SDR_STREAM_ERROR
SOAPY_SDR_CORRUPTION = _SoapySDR.SOAPY_SDR_CORRUPTION
SOAPY_SDR_OVERFLOW = _SoapySDR.SOAPY_SDR_OVERFLOW
SOAPY_SDR_NOT_SUPPORTED = _SoapySDR.SOAPY_SDR_NOT_SUPPORTED
SOAPY_SDR_TIME_ERROR = _SoapySDR.SOAPY_SDR_TIME_ERROR
SOAPY_SDR_UNDERFLOW = _SoapySDR.SOAPY_SDR_UNDERFLOW
SOAPY_SDR_API_VERSION = _SoapySDR.SOAPY_SDR_API_VERSION
SOAPY_SDR_ABI_VERSION = _SoapySDR.SOAPY_SDR_ABI_VERSION
def SoapySDR_getAPIVersion():
return _SoapySDR.SoapySDR_getAPIVersion()
SoapySDR_getAPIVersion = _SoapySDR.SoapySDR_getAPIVersion
def SoapySDR_getABIVersion():
return _SoapySDR.SoapySDR_getABIVersion()
SoapySDR_getABIVersion = _SoapySDR.SoapySDR_getABIVersion
def SoapySDR_getLibVersion():
return _SoapySDR.SoapySDR_getLibVersion()
SoapySDR_getLibVersion = _SoapySDR.SoapySDR_getLibVersion
SOAPY_SDR_CF64 = _SoapySDR.SOAPY_SDR_CF64
SOAPY_SDR_CF32 = _SoapySDR.SOAPY_SDR_CF32
SOAPY_SDR_CS32 = _SoapySDR.SOAPY_SDR_CS32
SOAPY_SDR_CU32 = _SoapySDR.SOAPY_SDR_CU32
SOAPY_SDR_CS16 = _SoapySDR.SOAPY_SDR_CS16
SOAPY_SDR_CU16 = _SoapySDR.SOAPY_SDR_CU16
SOAPY_SDR_CS12 = _SoapySDR.SOAPY_SDR_CS12
SOAPY_SDR_CU12 = _SoapySDR.SOAPY_SDR_CU12
SOAPY_SDR_CS8 = _SoapySDR.SOAPY_SDR_CS8
SOAPY_SDR_CU8 = _SoapySDR.SOAPY_SDR_CU8
SOAPY_SDR_CS4 = _SoapySDR.SOAPY_SDR_CS4
SOAPY_SDR_CU4 = _SoapySDR.SOAPY_SDR_CU4
SOAPY_SDR_F64 = _SoapySDR.SOAPY_SDR_F64
SOAPY_SDR_F32 = _SoapySDR.SOAPY_SDR_F32
SOAPY_SDR_S32 = _SoapySDR.SOAPY_SDR_S32
SOAPY_SDR_U32 = _SoapySDR.SOAPY_SDR_U32
SOAPY_SDR_S16 = _SoapySDR.SOAPY_SDR_S16
SOAPY_SDR_U16 = _SoapySDR.SOAPY_SDR_U16
SOAPY_SDR_S8 = _SoapySDR.SOAPY_SDR_S8
SOAPY_SDR_U8 = _SoapySDR.SOAPY_SDR_U8
def SoapySDR_formatToSize(*args):
return _SoapySDR.SoapySDR_formatToSize(*args)
SoapySDR_formatToSize = _SoapySDR.SoapySDR_formatToSize
SOAPY_SDR_FATAL = _SoapySDR.SOAPY_SDR_FATAL
SOAPY_SDR_CRITICAL = _SoapySDR.SOAPY_SDR_CRITICAL
SOAPY_SDR_ERROR = _SoapySDR.SOAPY_SDR_ERROR
SOAPY_SDR_WARNING = _SoapySDR.SOAPY_SDR_WARNING
SOAPY_SDR_NOTICE = _SoapySDR.SOAPY_SDR_NOTICE
SOAPY_SDR_INFO = _SoapySDR.SOAPY_SDR_INFO
SOAPY_SDR_DEBUG = _SoapySDR.SOAPY_SDR_DEBUG
SOAPY_SDR_TRACE = _SoapySDR.SOAPY_SDR_TRACE
SOAPY_SDR_SSI = _SoapySDR.SOAPY_SDR_SSI
def SoapySDR_log(*args):
return _SoapySDR.SoapySDR_log(*args)
SoapySDR_log = _SoapySDR.SoapySDR_log
def SoapySDR_setLogLevel(*args):
return _SoapySDR.SoapySDR_setLogLevel(*args)
SoapySDR_setLogLevel = _SoapySDR.SoapySDR_setLogLevel
def errToStr(*args):
return _SoapySDR.errToStr(*args)
errToStr = _SoapySDR.errToStr
def getAPIVersion():
return _SoapySDR.getAPIVersion()
getAPIVersion = _SoapySDR.getAPIVersion
def getABIVersion():
return _SoapySDR.getABIVersion()
getABIVersion = _SoapySDR.getABIVersion
def getLibVersion():
return _SoapySDR.getLibVersion()
getLibVersion = _SoapySDR.getLibVersion
def getRootPath():
return _SoapySDR.getRootPath()
getRootPath = _SoapySDR.getRootPath
def listSearchPaths():
return _SoapySDR.listSearchPaths()
listSearchPaths = _SoapySDR.listSearchPaths
def listModules(*args):
return _SoapySDR.listModules(*args)
listModules = _SoapySDR.listModules
def loadModule(*args):
return _SoapySDR.loadModule(*args)
loadModule = _SoapySDR.loadModule
def getLoaderResult(*args):
return _SoapySDR.getLoaderResult(*args)
getLoaderResult = _SoapySDR.getLoaderResult
def unloadModule(*args):
return _SoapySDR.unloadModule(*args)
unloadModule = _SoapySDR.unloadModule
def loadModules():
return _SoapySDR.loadModules()
loadModules = _SoapySDR.loadModules
def formatToSize(*args):
return _SoapySDR.formatToSize(*args)
formatToSize = _SoapySDR.formatToSize
def ticksToTimeNs(*args):
return _SoapySDR.ticksToTimeNs(*args)
ticksToTimeNs = _SoapySDR.ticksToTimeNs
def timeNsToTicks(*args):
return _SoapySDR.timeNsToTicks(*args)
timeNsToTicks = _SoapySDR.timeNsToTicks
def log(*args):
return _SoapySDR.log(*args)
log = _SoapySDR.log
def setLogLevel(*args):
return _SoapySDR.setLogLevel(*args)
setLogLevel = _SoapySDR.setLogLevel
class Device(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Device, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Device, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_Device
__del__ = lambda self : None;
__swig_getmethods__["enumerate"] = lambda x: _SoapySDR.Device_enumerate
if _newclass:enumerate = staticmethod(_SoapySDR.Device_enumerate)
__swig_getmethods__["make"] = lambda x: _SoapySDR.Device_make
if _newclass:make = staticmethod(_SoapySDR.Device_make)
__swig_getmethods__["unmake"] = lambda x: _SoapySDR.Device_unmake
if _newclass:unmake = staticmethod(_SoapySDR.Device_unmake)
def getDriverKey(self): return _SoapySDR.Device_getDriverKey(self)
def getHardwareKey(self): return _SoapySDR.Device_getHardwareKey(self)
def getHardwareInfo(self): return _SoapySDR.Device_getHardwareInfo(self)
def setFrontendMapping(self, *args): return _SoapySDR.Device_setFrontendMapping(self, *args)
def getFrontendMapping(self, *args): return _SoapySDR.Device_getFrontendMapping(self, *args)
def getNumChannels(self, *args): return _SoapySDR.Device_getNumChannels(self, *args)
def getChannelInfo(self, *args): return _SoapySDR.Device_getChannelInfo(self, *args)
def getFullDuplex(self, *args): return _SoapySDR.Device_getFullDuplex(self, *args)
def getStreamFormats(self, *args): return _SoapySDR.Device_getStreamFormats(self, *args)
def getNativeStreamFormat(self, *args): return _SoapySDR.Device_getNativeStreamFormat(self, *args)
def getStreamArgsInfo(self, *args): return _SoapySDR.Device_getStreamArgsInfo(self, *args)
def setupStream(self, *args): return _SoapySDR.Device_setupStream(self, *args)
def closeStream(self, *args): return _SoapySDR.Device_closeStream(self, *args)
def getStreamMTU(self, *args): return _SoapySDR.Device_getStreamMTU(self, *args)
def activateStream(self, *args): return _SoapySDR.Device_activateStream(self, *args)
def deactivateStream(self, *args): return _SoapySDR.Device_deactivateStream(self, *args)
def readStream(self, *args): return _SoapySDR.Device_readStream(self, *args)
def writeStream(self, *args): return _SoapySDR.Device_writeStream(self, *args)
def readStreamStatus(self, *args): return _SoapySDR.Device_readStreamStatus(self, *args)
def getNumDirectAccessBuffers(self, *args): return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)
def getDirectAccessBufferAddrs(self, *args): return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)
def acquireReadBuffer(self, *args): return _SoapySDR.Device_acquireReadBuffer(self, *args)
def releaseReadBuffer(self, *args): return _SoapySDR.Device_releaseReadBuffer(self, *args)
def acquireWriteBuffer(self, *args): return _SoapySDR.Device_acquireWriteBuffer(self, *args)
def releaseWriteBuffer(self, *args): return _SoapySDR.Device_releaseWriteBuffer(self, *args)
def listAntennas(self, *args): return _SoapySDR.Device_listAntennas(self, *args)
def setAntenna(self, *args): return _SoapySDR.Device_setAntenna(self, *args)
def getAntenna(self, *args): return _SoapySDR.Device_getAntenna(self, *args)
def hasDCOffsetMode(self, *args): return _SoapySDR.Device_hasDCOffsetMode(self, *args)
def setDCOffsetMode(self, *args): return _SoapySDR.Device_setDCOffsetMode(self, *args)
def getDCOffsetMode(self, *args): return _SoapySDR.Device_getDCOffsetMode(self, *args)
def hasDCOffset(self, *args): return _SoapySDR.Device_hasDCOffset(self, *args)
def setDCOffset(self, *args): return _SoapySDR.Device_setDCOffset(self, *args)
def getDCOffset(self, *args): return _SoapySDR.Device_getDCOffset(self, *args)
def hasIQBalance(self, *args): return _SoapySDR.Device_hasIQBalance(self, *args)
def setIQBalance(self, *args): return _SoapySDR.Device_setIQBalance(self, *args)
def getIQBalance(self, *args): return _SoapySDR.Device_getIQBalance(self, *args)
def hasFrequencyCorrection(self, *args): return _SoapySDR.Device_hasFrequencyCorrection(self, *args)
def setFrequencyCorrection(self, *args): return _SoapySDR.Device_setFrequencyCorrection(self, *args)
def getFrequencyCorrection(self, *args): return _SoapySDR.Device_getFrequencyCorrection(self, *args)
def listGains(self, *args): return _SoapySDR.Device_listGains(self, *args)
def hasGainMode(self, *args): return _SoapySDR.Device_hasGainMode(self, *args)
def setGainMode(self, *args): return _SoapySDR.Device_setGainMode(self, *args)
def getGainMode(self, *args): return _SoapySDR.Device_getGainMode(self, *args)
def setGain(self, *args): return _SoapySDR.Device_setGain(self, *args)
def getGain(self, *args): return _SoapySDR.Device_getGain(self, *args)
def getGainRange(self, *args): return _SoapySDR.Device_getGainRange(self, *args)
def setFrequency(self, *args): return _SoapySDR.Device_setFrequency(self, *args)
def getFrequency(self, *args): return _SoapySDR.Device_getFrequency(self, *args)
def listFrequencies(self, *args): return _SoapySDR.Device_listFrequencies(self, *args)
def getFrequencyRange(self, *args): return _SoapySDR.Device_getFrequencyRange(self, *args)
def getFrequencyArgsInfo(self, *args): return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)
def setSampleRate(self, *args): return _SoapySDR.Device_setSampleRate(self, *args)
def getSampleRate(self, *args): return _SoapySDR.Device_getSampleRate(self, *args)
def listSampleRates(self, *args): return _SoapySDR.Device_listSampleRates(self, *args)
def getSampleRateRange(self, *args): return _SoapySDR.Device_getSampleRateRange(self, *args)
def setBandwidth(self, *args): return _SoapySDR.Device_setBandwidth(self, *args)
def getBandwidth(self, *args): return _SoapySDR.Device_getBandwidth(self, *args)
def listBandwidths(self, *args): return _SoapySDR.Device_listBandwidths(self, *args)
def getBandwidthRange(self, *args): return _SoapySDR.Device_getBandwidthRange(self, *args)
def setMasterClockRate(self, *args): return _SoapySDR.Device_setMasterClockRate(self, *args)
def getMasterClockRate(self): return _SoapySDR.Device_getMasterClockRate(self)
def getMasterClockRates(self): return _SoapySDR.Device_getMasterClockRates(self)
def listClockSources(self): return _SoapySDR.Device_listClockSources(self)
def setClockSource(self, *args): return _SoapySDR.Device_setClockSource(self, *args)
def getClockSource(self): return _SoapySDR.Device_getClockSource(self)
def listTimeSources(self): return _SoapySDR.Device_listTimeSources(self)
def setTimeSource(self, *args): return _SoapySDR.Device_setTimeSource(self, *args)
def getTimeSource(self): return _SoapySDR.Device_getTimeSource(self)
def hasHardwareTime(self, what=""): return _SoapySDR.Device_hasHardwareTime(self, what)
def getHardwareTime(self, what=""): return _SoapySDR.Device_getHardwareTime(self, what)
def setHardwareTime(self, *args): return _SoapySDR.Device_setHardwareTime(self, *args)
def setCommandTime(self, *args): return _SoapySDR.Device_setCommandTime(self, *args)
def listSensors(self, *args): return _SoapySDR.Device_listSensors(self, *args)
def getSensorInfo(self, *args): return _SoapySDR.Device_getSensorInfo(self, *args)
def readSensor(self, *args): return _SoapySDR.Device_readSensor(self, *args)
def listRegisterInterfaces(self): return _SoapySDR.Device_listRegisterInterfaces(self)
def writeRegister(self, *args): return _SoapySDR.Device_writeRegister(self, *args)
def readRegister(self, *args): return _SoapySDR.Device_readRegister(self, *args)
def writeRegisters(self, *args): return _SoapySDR.Device_writeRegisters(self, *args)
def readRegisters(self, *args): return _SoapySDR.Device_readRegisters(self, *args)
def getSettingInfo(self, *args): return _SoapySDR.Device_getSettingInfo(self, *args)
def writeSetting(self, *args): return _SoapySDR.Device_writeSetting(self, *args)
def readSetting(self, *args): return _SoapySDR.Device_readSetting(self, *args)
def listGPIOBanks(self): return _SoapySDR.Device_listGPIOBanks(self)
def writeGPIO(self, *args): return _SoapySDR.Device_writeGPIO(self, *args)
def readGPIO(self, *args): return _SoapySDR.Device_readGPIO(self, *args)
def writeGPIODir(self, *args): return _SoapySDR.Device_writeGPIODir(self, *args)
def readGPIODir(self, *args): return _SoapySDR.Device_readGPIODir(self, *args)
def writeI2C(self, *args): return _SoapySDR.Device_writeI2C(self, *args)
def readI2C(self, *args): return _SoapySDR.Device_readI2C(self, *args)
def transactSPI(self, *args): return _SoapySDR.Device_transactSPI(self, *args)
def listUARTs(self): return _SoapySDR.Device_listUARTs(self)
def writeUART(self, *args): return _SoapySDR.Device_writeUART(self, *args)
def readUART(self, *args): return _SoapySDR.Device_readUART(self, *args)
def readStream__(self, *args): return _SoapySDR.Device_readStream__(self, *args)
def writeStream__(self, *args): return _SoapySDR.Device_writeStream__(self, *args)
def readStreamStatus__(self, *args): return _SoapySDR.Device_readStreamStatus__(self, *args)
#call unmake from custom deleter
def __del__(self):
Device.unmake(self)
def __str__(self):
return "%s:%s"%(self.getDriverKey(), self.getHardwareKey())
def readStream(self, stream, buffs, numElems, flags = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)
def writeStream(self, stream, buffs, numElems, flags = 0, timeNs = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.writeStream__(stream, ptrs, numElems, flags, timeNs, timeoutUs)
def readStreamStatus(self, stream, timeoutUs = 100000):
return self.readStreamStatus__(stream, timeoutUs)
Device_swigregister = _SoapySDR.Device_swigregister
Device_swigregister(Device)
def Device_enumerate(*args):
return _SoapySDR.Device_enumerate(*args)
Device_enumerate = _SoapySDR.Device_enumerate
def Device_make(*args):
return _SoapySDR.Device_make(*args)
Device_make = _SoapySDR.Device_make
def Device_unmake(*args):
return _SoapySDR.Device_unmake(*args)
Device_unmake = _SoapySDR.Device_unmake
__all__ = list()
for key in sorted(globals().keys()):
if key.startswith('SOAPY_SDR_'):
__all__.append(key)
_Device = Device
class Device(Device):
def __new__(cls, *args, **kwargs):
return cls.make(*args, **kwargs)
def extractBuffPointer(buff):
if hasattr(buff, '__array_interface__'): return buff.__array_interface__['data'][0]
if hasattr(buff, '__long__'): return long(buff)
if hasattr(buff, '__int__'): return int(buff)
raise Exception("Unrecognized data format: " + str(type(buff)))
# This file is compatible with both classic and new-style classes.
|
normal
|
{
"blob_id": "a6670d0d09f02b674bc31b770f42d4d8a01a4a4e",
"index": 9884,
"step-1": "<mask token>\n\n\nclass SoapySDRSizeList(_object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n <mask token>\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n <mask token>\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n <mask token>\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n <mask token>\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n <mask token>\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n <mask token>\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n <mask token>\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n <mask token>\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n <mask token>\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if name == 'thisown':\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(name)\n\n\ndef _swig_repr(self):\n try:\n strthis = 'proxy of ' + self.this.__repr__()\n except:\n strthis = ''\n return '<%s.%s; %s >' % (self.__class__.__module__, self.__class__.\n __name__, strthis)\n\n\n<mask token>\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined - class is abstract')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self: None\n\n def value(self):\n return _SoapySDR.SwigPyIterator_value(self)\n\n def incr(self, n=1):\n return _SoapySDR.SwigPyIterator_incr(self, n)\n\n def decr(self, n=1):\n return _SoapySDR.SwigPyIterator_decr(self, n)\n\n def distance(self, *args):\n return _SoapySDR.SwigPyIterator_distance(self, *args)\n\n def equal(self, *args):\n return _SoapySDR.SwigPyIterator_equal(self, *args)\n\n def copy(self):\n return _SoapySDR.SwigPyIterator_copy(self)\n\n def next(self):\n return _SoapySDR.SwigPyIterator_next(self)\n\n def __next__(self):\n return _SoapySDR.SwigPyIterator___next__(self)\n\n def previous(self):\n return _SoapySDR.SwigPyIterator_previous(self)\n\n def advance(self, *args):\n return _SoapySDR.SwigPyIterator_advance(self, *args)\n\n def __eq__(self, *args):\n return _SoapySDR.SwigPyIterator___eq__(self, *args)\n\n def __ne__(self, *args):\n return _SoapySDR.SwigPyIterator___ne__(self, *args)\n\n def __iadd__(self, *args):\n return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n\n def __isub__(self, *args):\n return _SoapySDR.SwigPyIterator___isub__(self, *args)\n\n def __add__(self, *args):\n return _SoapySDR.SwigPyIterator___add__(self, *args)\n\n def __sub__(self, *args):\n return _SoapySDR.SwigPyIterator___sub__(self, *args)\n\n def __iter__(self):\n return self\n\n\n<mask token>\n\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\n\n\n<mask token>\n\n\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name,\n value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n\n def __init__(self, *args):\n this = _SoapySDR.new_Range(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def minimum(self):\n return _SoapySDR.Range_minimum(self)\n\n def maximum(self):\n return _SoapySDR.Range_maximum(self)\n\n def step(self):\n return _SoapySDR.Range_step(self)\n\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0:\n fields.append(self.step())\n return ', '.join([('%g' % f) for f in fields])\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_ArgInfo()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['key'] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__['key'] = _SoapySDR.ArgInfo_key_get\n if _newclass:\n key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.\n ArgInfo_key_set)\n __swig_setmethods__['value'] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__['value'] = _SoapySDR.ArgInfo_value_get\n if _newclass:\n value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.\n ArgInfo_value_set)\n __swig_setmethods__['name'] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__['name'] = _SoapySDR.ArgInfo_name_get\n if _newclass:\n name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.\n ArgInfo_name_set)\n __swig_setmethods__['description'] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__['description'] = _SoapySDR.ArgInfo_description_get\n if _newclass:\n description = _swig_property(_SoapySDR.ArgInfo_description_get,\n _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__['units'] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__['units'] = _SoapySDR.ArgInfo_units_get\n if _newclass:\n units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.\n ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__['type'] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__['type'] = _SoapySDR.ArgInfo_type_get\n if _newclass:\n type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.\n ArgInfo_type_set)\n __swig_setmethods__['range'] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__['range'] = _SoapySDR.ArgInfo_range_get\n if _newclass:\n range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.\n ArgInfo_range_set)\n __swig_setmethods__['options'] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__['options'] = _SoapySDR.ArgInfo_options_get\n if _newclass:\n options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.\n ArgInfo_options_set)\n __swig_setmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:\n optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get,\n _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargs_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargs___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargs___len__(self)\n\n def __iter__(self):\n return self.key_iterator()\n\n def iterkeys(self):\n return self.key_iterator()\n\n def itervalues(self):\n return self.value_iterator()\n\n def iteritems(self):\n return self.iterator()\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n\n def has_key(self, *args):\n return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n\n def keys(self):\n return _SoapySDR.SoapySDRKwargs_keys(self)\n\n def values(self):\n return _SoapySDR.SoapySDRKwargs_values(self)\n\n def items(self):\n return _SoapySDR.SoapySDRKwargs_items(self)\n\n def __contains__(self, *args):\n return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n\n def key_iterator(self):\n return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n\n def value_iterator(self):\n return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n\n def asdict(self):\n return _SoapySDR.SoapySDRKwargs_asdict(self)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargs_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargs_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargs_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargs_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargs_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargs_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargs_rend(self)\n\n def count(self, *args):\n return _SoapySDR.SoapySDRKwargs_count(self, *args)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n\n def find(self, *args):\n return _SoapySDR.SoapySDRKwargs_find(self, *args)\n\n def lower_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n\n def upper_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append('%s=%s' % (k, v))\n return '{' + ', '.join(out) + '}'\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargsList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\n\n\n<mask token>\n\n\ndef loadModules():\n return _SoapySDR.loadModules()\n\n\n<mask token>\n\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\n\n\n<mask token>\n\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\n\n\n<mask token>\n\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if name == 'thisown':\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(name)\n\n\ndef _swig_repr(self):\n try:\n strthis = 'proxy of ' + self.this.__repr__()\n except:\n strthis = ''\n return '<%s.%s; %s >' % (self.__class__.__module__, self.__class__.\n __name__, strthis)\n\n\n<mask token>\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined - class is abstract')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self: None\n\n def value(self):\n return _SoapySDR.SwigPyIterator_value(self)\n\n def incr(self, n=1):\n return _SoapySDR.SwigPyIterator_incr(self, n)\n\n def decr(self, n=1):\n return _SoapySDR.SwigPyIterator_decr(self, n)\n\n def distance(self, *args):\n return _SoapySDR.SwigPyIterator_distance(self, *args)\n\n def equal(self, *args):\n return _SoapySDR.SwigPyIterator_equal(self, *args)\n\n def copy(self):\n return _SoapySDR.SwigPyIterator_copy(self)\n\n def next(self):\n return _SoapySDR.SwigPyIterator_next(self)\n\n def __next__(self):\n return _SoapySDR.SwigPyIterator___next__(self)\n\n def previous(self):\n return _SoapySDR.SwigPyIterator_previous(self)\n\n def advance(self, *args):\n return _SoapySDR.SwigPyIterator_advance(self, *args)\n\n def __eq__(self, *args):\n return _SoapySDR.SwigPyIterator___eq__(self, *args)\n\n def __ne__(self, *args):\n return _SoapySDR.SwigPyIterator___ne__(self, *args)\n\n def __iadd__(self, *args):\n return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n\n def __isub__(self, *args):\n return _SoapySDR.SwigPyIterator___isub__(self, *args)\n\n def __add__(self, *args):\n return _SoapySDR.SwigPyIterator___add__(self, *args)\n\n def __sub__(self, *args):\n return _SoapySDR.SwigPyIterator___sub__(self, *args)\n\n def __iter__(self):\n return self\n\n\n<mask token>\n\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\n\n\n<mask token>\n\n\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name,\n value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n\n def __init__(self, *args):\n this = _SoapySDR.new_Range(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def minimum(self):\n return _SoapySDR.Range_minimum(self)\n\n def maximum(self):\n return _SoapySDR.Range_maximum(self)\n\n def step(self):\n return _SoapySDR.Range_step(self)\n\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0:\n fields.append(self.step())\n return ', '.join([('%g' % f) for f in fields])\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_ArgInfo()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['key'] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__['key'] = _SoapySDR.ArgInfo_key_get\n if _newclass:\n key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.\n ArgInfo_key_set)\n __swig_setmethods__['value'] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__['value'] = _SoapySDR.ArgInfo_value_get\n if _newclass:\n value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.\n ArgInfo_value_set)\n __swig_setmethods__['name'] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__['name'] = _SoapySDR.ArgInfo_name_get\n if _newclass:\n name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.\n ArgInfo_name_set)\n __swig_setmethods__['description'] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__['description'] = _SoapySDR.ArgInfo_description_get\n if _newclass:\n description = _swig_property(_SoapySDR.ArgInfo_description_get,\n _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__['units'] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__['units'] = _SoapySDR.ArgInfo_units_get\n if _newclass:\n units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.\n ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__['type'] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__['type'] = _SoapySDR.ArgInfo_type_get\n if _newclass:\n type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.\n ArgInfo_type_set)\n __swig_setmethods__['range'] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__['range'] = _SoapySDR.ArgInfo_range_get\n if _newclass:\n range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.\n ArgInfo_range_set)\n __swig_setmethods__['options'] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__['options'] = _SoapySDR.ArgInfo_options_get\n if _newclass:\n options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.\n ArgInfo_options_set)\n __swig_setmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__['optionNames'] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:\n optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get,\n _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargs_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargs___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargs___len__(self)\n\n def __iter__(self):\n return self.key_iterator()\n\n def iterkeys(self):\n return self.key_iterator()\n\n def itervalues(self):\n return self.value_iterator()\n\n def iteritems(self):\n return self.iterator()\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n\n def has_key(self, *args):\n return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n\n def keys(self):\n return _SoapySDR.SoapySDRKwargs_keys(self)\n\n def values(self):\n return _SoapySDR.SoapySDRKwargs_values(self)\n\n def items(self):\n return _SoapySDR.SoapySDRKwargs_items(self)\n\n def __contains__(self, *args):\n return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n\n def key_iterator(self):\n return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n\n def value_iterator(self):\n return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n\n def asdict(self):\n return _SoapySDR.SoapySDRKwargs_asdict(self)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargs_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargs_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargs_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargs_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargs_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargs_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargs_rend(self)\n\n def count(self, *args):\n return _SoapySDR.SoapySDRKwargs_count(self, *args)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n\n def find(self, *args):\n return _SoapySDR.SoapySDRKwargs_find(self, *args)\n\n def lower_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n\n def upper_bound(self, *args):\n return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append('%s=%s' % (k, v))\n return '{' + ', '.join(out) + '}'\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRKwargsList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRKwargsList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRKwargsList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRKwargsList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRKwargsList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRKwargsList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRKwargsList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRKwargsList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRKwargsList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRKwargsList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRKwargsList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRKwargsList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self,\n SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRArgInfoList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRArgInfoList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRArgInfoList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRArgInfoList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRArgInfoList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRArgInfoList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRArgInfoList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRArgInfoList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRArgInfoList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRArgInfoList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRStringList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRStringList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRStringList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRStringList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRStringList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRStringList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRStringList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRStringList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRStringList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRStringList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRStringList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRStringList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRStringList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRStringList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRStringList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRStringList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRStringList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRStringList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRStringList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRStringList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRStringList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRStringList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRStringList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRRangeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRRangeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRRangeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRRangeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRRangeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRRangeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRRangeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRRangeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRRangeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRRangeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRRangeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRRangeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRRangeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRRangeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRRangeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name\n )\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRSizeList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRSizeList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRSizeList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRSizeList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRSizeList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRSizeList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRSizeList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRSizeList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRSizeList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRSizeList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRSizeList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRSizeList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRSizeList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRSizeList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRSizeList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList,\n name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _SoapySDR.SoapySDRDoubleList_iterator(self)\n\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n\n def __bool__(self):\n return _SoapySDR.SoapySDRDoubleList___bool__(self)\n\n def __len__(self):\n return _SoapySDR.SoapySDRDoubleList___len__(self)\n\n def pop(self):\n return _SoapySDR.SoapySDRDoubleList_pop(self)\n\n def __getslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n\n def __setslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n\n def __delslice__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n\n def __delitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n\n def append(self, *args):\n return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n\n def empty(self):\n return _SoapySDR.SoapySDRDoubleList_empty(self)\n\n def size(self):\n return _SoapySDR.SoapySDRDoubleList_size(self)\n\n def clear(self):\n return _SoapySDR.SoapySDRDoubleList_clear(self)\n\n def swap(self, *args):\n return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n\n def get_allocator(self):\n return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n\n def begin(self):\n return _SoapySDR.SoapySDRDoubleList_begin(self)\n\n def end(self):\n return _SoapySDR.SoapySDRDoubleList_end(self)\n\n def rbegin(self):\n return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n\n def rend(self):\n return _SoapySDR.SoapySDRDoubleList_rend(self)\n\n def pop_back(self):\n return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n\n def erase(self, *args):\n return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n\n def __init__(self, *args):\n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try:\n self.this.append(this)\n except:\n self.this = this\n\n def push_back(self, *args):\n return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n\n def front(self):\n return _SoapySDR.SoapySDRDoubleList_front(self)\n\n def back(self):\n return _SoapySDR.SoapySDRDoubleList_back(self)\n\n def assign(self, *args):\n return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n\n def resize(self, *args):\n return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n\n def insert(self, *args):\n return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n\n def reserve(self, *args):\n return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n\n def capacity(self):\n return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self: None\n\n\n<mask token>\n\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self,\n StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n\n def __init__(self):\n this = _SoapySDR.new_StreamResult()\n try:\n self.this.append(this)\n except:\n self.this = this\n __swig_setmethods__['ret'] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__['ret'] = _SoapySDR.StreamResult_ret_get\n if _newclass:\n ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.\n StreamResult_ret_set)\n __swig_setmethods__['flags'] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__['flags'] = _SoapySDR.StreamResult_flags_get\n if _newclass:\n flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.\n StreamResult_flags_set)\n __swig_setmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__['timeNs'] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:\n timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get,\n _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__['chanMask'] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:\n chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get,\n _SoapySDR.StreamResult_chanMask_set)\n\n def __str__(self):\n return 'ret=%s, flags=%s, timeNs=%s' % (self.ret, self.flags, self.\n timeNs)\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self: None\n\n\n<mask token>\n\n\ndef SoapySDR_errToStr(*args):\n return _SoapySDR.SoapySDR_errToStr(*args)\n\n\n<mask token>\n\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_getLibVersion():\n return _SoapySDR.SoapySDR_getLibVersion()\n\n\n<mask token>\n\n\ndef SoapySDR_log(*args):\n return _SoapySDR.SoapySDR_log(*args)\n\n\n<mask token>\n\n\ndef SoapySDR_setLogLevel(*args):\n return _SoapySDR.SoapySDR_setLogLevel(*args)\n\n\n<mask token>\n\n\ndef errToStr(*args):\n return _SoapySDR.errToStr(*args)\n\n\n<mask token>\n\n\ndef getAPIVersion():\n return _SoapySDR.getAPIVersion()\n\n\n<mask token>\n\n\ndef getABIVersion():\n return _SoapySDR.getABIVersion()\n\n\n<mask token>\n\n\ndef getLibVersion():\n return _SoapySDR.getLibVersion()\n\n\n<mask token>\n\n\ndef getRootPath():\n return _SoapySDR.getRootPath()\n\n\n<mask token>\n\n\ndef listSearchPaths():\n return _SoapySDR.listSearchPaths()\n\n\n<mask token>\n\n\ndef listModules(*args):\n return _SoapySDR.listModules(*args)\n\n\n<mask token>\n\n\ndef loadModule(*args):\n return _SoapySDR.loadModule(*args)\n\n\n<mask token>\n\n\ndef getLoaderResult(*args):\n return _SoapySDR.getLoaderResult(*args)\n\n\n<mask token>\n\n\ndef unloadModule(*args):\n return _SoapySDR.unloadModule(*args)\n\n\n<mask token>\n\n\ndef loadModules():\n return _SoapySDR.loadModules()\n\n\n<mask token>\n\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\n\n\n<mask token>\n\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\n\n\n<mask token>\n\n\ndef log(*args):\n return _SoapySDR.log(*args)\n\n\n<mask token>\n\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\n\n\n<mask token>\n\n\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device,\n name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError('No constructor defined')\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self: None\n __swig_getmethods__['enumerate'] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:\n enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__['make'] = lambda x: _SoapySDR.Device_make\n if _newclass:\n make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__['unmake'] = lambda x: _SoapySDR.Device_unmake\n if _newclass:\n unmake = staticmethod(_SoapySDR.Device_unmake)\n\n def getDriverKey(self):\n return _SoapySDR.Device_getDriverKey(self)\n\n def getHardwareKey(self):\n return _SoapySDR.Device_getHardwareKey(self)\n\n def getHardwareInfo(self):\n return _SoapySDR.Device_getHardwareInfo(self)\n\n def setFrontendMapping(self, *args):\n return _SoapySDR.Device_setFrontendMapping(self, *args)\n\n def getFrontendMapping(self, *args):\n return _SoapySDR.Device_getFrontendMapping(self, *args)\n\n def getNumChannels(self, *args):\n return _SoapySDR.Device_getNumChannels(self, *args)\n\n def getChannelInfo(self, *args):\n return _SoapySDR.Device_getChannelInfo(self, *args)\n\n def getFullDuplex(self, *args):\n return _SoapySDR.Device_getFullDuplex(self, *args)\n\n def getStreamFormats(self, *args):\n return _SoapySDR.Device_getStreamFormats(self, *args)\n\n def getNativeStreamFormat(self, *args):\n return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n\n def getStreamArgsInfo(self, *args):\n return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n\n def setupStream(self, *args):\n return _SoapySDR.Device_setupStream(self, *args)\n\n def closeStream(self, *args):\n return _SoapySDR.Device_closeStream(self, *args)\n\n def getStreamMTU(self, *args):\n return _SoapySDR.Device_getStreamMTU(self, *args)\n\n def activateStream(self, *args):\n return _SoapySDR.Device_activateStream(self, *args)\n\n def deactivateStream(self, *args):\n return _SoapySDR.Device_deactivateStream(self, *args)\n\n def readStream(self, *args):\n return _SoapySDR.Device_readStream(self, *args)\n\n def writeStream(self, *args):\n return _SoapySDR.Device_writeStream(self, *args)\n\n def readStreamStatus(self, *args):\n return _SoapySDR.Device_readStreamStatus(self, *args)\n\n def getNumDirectAccessBuffers(self, *args):\n return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n\n def getDirectAccessBufferAddrs(self, *args):\n return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n\n def acquireReadBuffer(self, *args):\n return _SoapySDR.Device_acquireReadBuffer(self, *args)\n\n def releaseReadBuffer(self, *args):\n return _SoapySDR.Device_releaseReadBuffer(self, *args)\n\n def acquireWriteBuffer(self, *args):\n return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n\n def releaseWriteBuffer(self, *args):\n return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n\n def listAntennas(self, *args):\n return _SoapySDR.Device_listAntennas(self, *args)\n\n def setAntenna(self, *args):\n return _SoapySDR.Device_setAntenna(self, *args)\n\n def getAntenna(self, *args):\n return _SoapySDR.Device_getAntenna(self, *args)\n\n def hasDCOffsetMode(self, *args):\n return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n\n def setDCOffsetMode(self, *args):\n return _SoapySDR.Device_setDCOffsetMode(self, *args)\n\n def getDCOffsetMode(self, *args):\n return _SoapySDR.Device_getDCOffsetMode(self, *args)\n\n def hasDCOffset(self, *args):\n return _SoapySDR.Device_hasDCOffset(self, *args)\n\n def setDCOffset(self, *args):\n return _SoapySDR.Device_setDCOffset(self, *args)\n\n def getDCOffset(self, *args):\n return _SoapySDR.Device_getDCOffset(self, *args)\n\n def hasIQBalance(self, *args):\n return _SoapySDR.Device_hasIQBalance(self, *args)\n\n def setIQBalance(self, *args):\n return _SoapySDR.Device_setIQBalance(self, *args)\n\n def getIQBalance(self, *args):\n return _SoapySDR.Device_getIQBalance(self, *args)\n\n def hasFrequencyCorrection(self, *args):\n return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n\n def setFrequencyCorrection(self, *args):\n return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n\n def getFrequencyCorrection(self, *args):\n return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n\n def listGains(self, *args):\n return _SoapySDR.Device_listGains(self, *args)\n\n def hasGainMode(self, *args):\n return _SoapySDR.Device_hasGainMode(self, *args)\n\n def setGainMode(self, *args):\n return _SoapySDR.Device_setGainMode(self, *args)\n\n def getGainMode(self, *args):\n return _SoapySDR.Device_getGainMode(self, *args)\n\n def setGain(self, *args):\n return _SoapySDR.Device_setGain(self, *args)\n\n def getGain(self, *args):\n return _SoapySDR.Device_getGain(self, *args)\n\n def getGainRange(self, *args):\n return _SoapySDR.Device_getGainRange(self, *args)\n\n def setFrequency(self, *args):\n return _SoapySDR.Device_setFrequency(self, *args)\n\n def getFrequency(self, *args):\n return _SoapySDR.Device_getFrequency(self, *args)\n\n def listFrequencies(self, *args):\n return _SoapySDR.Device_listFrequencies(self, *args)\n\n def getFrequencyRange(self, *args):\n return _SoapySDR.Device_getFrequencyRange(self, *args)\n\n def getFrequencyArgsInfo(self, *args):\n return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n\n def setSampleRate(self, *args):\n return _SoapySDR.Device_setSampleRate(self, *args)\n\n def getSampleRate(self, *args):\n return _SoapySDR.Device_getSampleRate(self, *args)\n\n def listSampleRates(self, *args):\n return _SoapySDR.Device_listSampleRates(self, *args)\n\n def getSampleRateRange(self, *args):\n return _SoapySDR.Device_getSampleRateRange(self, *args)\n\n def setBandwidth(self, *args):\n return _SoapySDR.Device_setBandwidth(self, *args)\n\n def getBandwidth(self, *args):\n return _SoapySDR.Device_getBandwidth(self, *args)\n\n def listBandwidths(self, *args):\n return _SoapySDR.Device_listBandwidths(self, *args)\n\n def getBandwidthRange(self, *args):\n return _SoapySDR.Device_getBandwidthRange(self, *args)\n\n def setMasterClockRate(self, *args):\n return _SoapySDR.Device_setMasterClockRate(self, *args)\n\n def getMasterClockRate(self):\n return _SoapySDR.Device_getMasterClockRate(self)\n\n def getMasterClockRates(self):\n return _SoapySDR.Device_getMasterClockRates(self)\n\n def listClockSources(self):\n return _SoapySDR.Device_listClockSources(self)\n\n def setClockSource(self, *args):\n return _SoapySDR.Device_setClockSource(self, *args)\n\n def getClockSource(self):\n return _SoapySDR.Device_getClockSource(self)\n\n def listTimeSources(self):\n return _SoapySDR.Device_listTimeSources(self)\n\n def setTimeSource(self, *args):\n return _SoapySDR.Device_setTimeSource(self, *args)\n\n def getTimeSource(self):\n return _SoapySDR.Device_getTimeSource(self)\n\n def hasHardwareTime(self, what=''):\n return _SoapySDR.Device_hasHardwareTime(self, what)\n\n def getHardwareTime(self, what=''):\n return _SoapySDR.Device_getHardwareTime(self, what)\n\n def setHardwareTime(self, *args):\n return _SoapySDR.Device_setHardwareTime(self, *args)\n\n def setCommandTime(self, *args):\n return _SoapySDR.Device_setCommandTime(self, *args)\n\n def listSensors(self, *args):\n return _SoapySDR.Device_listSensors(self, *args)\n\n def getSensorInfo(self, *args):\n return _SoapySDR.Device_getSensorInfo(self, *args)\n\n def readSensor(self, *args):\n return _SoapySDR.Device_readSensor(self, *args)\n\n def listRegisterInterfaces(self):\n return _SoapySDR.Device_listRegisterInterfaces(self)\n\n def writeRegister(self, *args):\n return _SoapySDR.Device_writeRegister(self, *args)\n\n def readRegister(self, *args):\n return _SoapySDR.Device_readRegister(self, *args)\n\n def writeRegisters(self, *args):\n return _SoapySDR.Device_writeRegisters(self, *args)\n\n def readRegisters(self, *args):\n return _SoapySDR.Device_readRegisters(self, *args)\n\n def getSettingInfo(self, *args):\n return _SoapySDR.Device_getSettingInfo(self, *args)\n\n def writeSetting(self, *args):\n return _SoapySDR.Device_writeSetting(self, *args)\n\n def readSetting(self, *args):\n return _SoapySDR.Device_readSetting(self, *args)\n\n def listGPIOBanks(self):\n return _SoapySDR.Device_listGPIOBanks(self)\n\n def writeGPIO(self, *args):\n return _SoapySDR.Device_writeGPIO(self, *args)\n\n def readGPIO(self, *args):\n return _SoapySDR.Device_readGPIO(self, *args)\n\n def writeGPIODir(self, *args):\n return _SoapySDR.Device_writeGPIODir(self, *args)\n\n def readGPIODir(self, *args):\n return _SoapySDR.Device_readGPIODir(self, *args)\n\n def writeI2C(self, *args):\n return _SoapySDR.Device_writeI2C(self, *args)\n\n def readI2C(self, *args):\n return _SoapySDR.Device_readI2C(self, *args)\n\n def transactSPI(self, *args):\n return _SoapySDR.Device_transactSPI(self, *args)\n\n def listUARTs(self):\n return _SoapySDR.Device_listUARTs(self)\n\n def writeUART(self, *args):\n return _SoapySDR.Device_writeUART(self, *args)\n\n def readUART(self, *args):\n return _SoapySDR.Device_readUART(self, *args)\n\n def readStream__(self, *args):\n return _SoapySDR.Device_readStream__(self, *args)\n\n def writeStream__(self, *args):\n return _SoapySDR.Device_writeStream__(self, *args)\n\n def readStreamStatus__(self, *args):\n return _SoapySDR.Device_readStreamStatus__(self, *args)\n\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return '%s:%s' % (self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags=0, timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags=0, timeNs=0,\n timeoutUs=100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs,\n timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs=100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\n\n<mask token>\n\n\ndef Device_enumerate(*args):\n return _SoapySDR.Device_enumerate(*args)\n\n\n<mask token>\n\n\ndef Device_make(*args):\n return _SoapySDR.Device_make(*args)\n\n\n<mask token>\n\n\ndef Device_unmake(*args):\n return _SoapySDR.Device_unmake(*args)\n\n\n<mask token>\n\n\nclass Device(Device):\n\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\n\ndef extractBuffPointer(buff):\n if hasattr(buff, '__array_interface__'):\n return buff.__array_interface__['data'][0]\n if hasattr(buff, '__long__'):\n return long(buff)\n if hasattr(buff, '__int__'):\n return int(buff)\n raise Exception('Unrecognized data format: ' + str(type(buff)))\n",
"step-5": "# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 2.0.12\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\n\n\n\n\nfrom sys import version_info\nif version_info >= (2,6,0):\n def swig_import_helper():\n from os.path import dirname\n import imp\n fp = None\n try:\n fp, pathname, description = imp.find_module('_SoapySDR', [dirname(__file__)])\n except ImportError:\n import _SoapySDR\n return _SoapySDR\n if fp is not None:\n try:\n _mod = imp.load_module('_SoapySDR', fp, pathname, description)\n finally:\n fp.close()\n return _mod\n _SoapySDR = swig_import_helper()\n del swig_import_helper\nelse:\n import _SoapySDR\ndel version_info\ntry:\n _swig_property = property\nexcept NameError:\n pass # Python < 2.2 doesn't have 'property'.\ndef _swig_setattr_nondynamic(self,class_type,name,value,static=1):\n if (name == \"thisown\"): return self.this.own(value)\n if (name == \"this\"):\n if type(value).__name__ == 'SwigPyObject':\n self.__dict__[name] = value\n return\n method = class_type.__swig_setmethods__.get(name,None)\n if method: return method(self,value)\n if (not static):\n self.__dict__[name] = value\n else:\n raise AttributeError(\"You cannot add attributes to %s\" % self)\n\ndef _swig_setattr(self,class_type,name,value):\n return _swig_setattr_nondynamic(self,class_type,name,value,0)\n\ndef _swig_getattr(self,class_type,name):\n if (name == \"thisown\"): return self.this.own()\n method = class_type.__swig_getmethods__.get(name,None)\n if method: return method(self)\n raise AttributeError(name)\n\ndef _swig_repr(self):\n try: strthis = \"proxy of \" + self.this.__repr__()\n except: strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\ntry:\n _object = object\n _newclass = 1\nexcept AttributeError:\n class _object : pass\n _newclass = 0\n\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n def __init__(self, *args, **kwargs): raise AttributeError(\"No constructor defined - class is abstract\")\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_SwigPyIterator\n __del__ = lambda self : None;\n def value(self): return _SoapySDR.SwigPyIterator_value(self)\n def incr(self, n=1): return _SoapySDR.SwigPyIterator_incr(self, n)\n def decr(self, n=1): return _SoapySDR.SwigPyIterator_decr(self, n)\n def distance(self, *args): return _SoapySDR.SwigPyIterator_distance(self, *args)\n def equal(self, *args): return _SoapySDR.SwigPyIterator_equal(self, *args)\n def copy(self): return _SoapySDR.SwigPyIterator_copy(self)\n def next(self): return _SoapySDR.SwigPyIterator_next(self)\n def __next__(self): return _SoapySDR.SwigPyIterator___next__(self)\n def previous(self): return _SoapySDR.SwigPyIterator_previous(self)\n def advance(self, *args): return _SoapySDR.SwigPyIterator_advance(self, *args)\n def __eq__(self, *args): return _SoapySDR.SwigPyIterator___eq__(self, *args)\n def __ne__(self, *args): return _SoapySDR.SwigPyIterator___ne__(self, *args)\n def __iadd__(self, *args): return _SoapySDR.SwigPyIterator___iadd__(self, *args)\n def __isub__(self, *args): return _SoapySDR.SwigPyIterator___isub__(self, *args)\n def __add__(self, *args): return _SoapySDR.SwigPyIterator___add__(self, *args)\n def __sub__(self, *args): return _SoapySDR.SwigPyIterator___sub__(self, *args)\n def __iter__(self): return self\nSwigPyIterator_swigregister = _SoapySDR.SwigPyIterator_swigregister\nSwigPyIterator_swigregister(SwigPyIterator)\n\n\ndef KwargsFromString(*args):\n return _SoapySDR.KwargsFromString(*args)\nKwargsFromString = _SoapySDR.KwargsFromString\n\ndef KwargsToString(*args):\n return _SoapySDR.KwargsToString(*args)\nKwargsToString = _SoapySDR.KwargsToString\nclass Range(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Range, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Range, name)\n __repr__ = _swig_repr\n def __init__(self, *args): \n this = _SoapySDR.new_Range(*args)\n try: self.this.append(this)\n except: self.this = this\n def minimum(self): return _SoapySDR.Range_minimum(self)\n def maximum(self): return _SoapySDR.Range_maximum(self)\n def step(self): return _SoapySDR.Range_step(self)\n def __str__(self):\n fields = [self.minimum(), self.maximum()]\n if self.step() != 0.0: fields.append(self.step())\n return ', '.join(['%g'%f for f in fields])\n\n __swig_destroy__ = _SoapySDR.delete_Range\n __del__ = lambda self : None;\nRange_swigregister = _SoapySDR.Range_swigregister\nRange_swigregister(Range)\n\nclass ArgInfo(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)\n __repr__ = _swig_repr\n def __init__(self): \n this = _SoapySDR.new_ArgInfo()\n try: self.this.append(this)\n except: self.this = this\n __swig_setmethods__[\"key\"] = _SoapySDR.ArgInfo_key_set\n __swig_getmethods__[\"key\"] = _SoapySDR.ArgInfo_key_get\n if _newclass:key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.ArgInfo_key_set)\n __swig_setmethods__[\"value\"] = _SoapySDR.ArgInfo_value_set\n __swig_getmethods__[\"value\"] = _SoapySDR.ArgInfo_value_get\n if _newclass:value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.ArgInfo_value_set)\n __swig_setmethods__[\"name\"] = _SoapySDR.ArgInfo_name_set\n __swig_getmethods__[\"name\"] = _SoapySDR.ArgInfo_name_get\n if _newclass:name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.ArgInfo_name_set)\n __swig_setmethods__[\"description\"] = _SoapySDR.ArgInfo_description_set\n __swig_getmethods__[\"description\"] = _SoapySDR.ArgInfo_description_get\n if _newclass:description = _swig_property(_SoapySDR.ArgInfo_description_get, _SoapySDR.ArgInfo_description_set)\n __swig_setmethods__[\"units\"] = _SoapySDR.ArgInfo_units_set\n __swig_getmethods__[\"units\"] = _SoapySDR.ArgInfo_units_get\n if _newclass:units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.ArgInfo_units_set)\n BOOL = _SoapySDR.ArgInfo_BOOL\n INT = _SoapySDR.ArgInfo_INT\n FLOAT = _SoapySDR.ArgInfo_FLOAT\n STRING = _SoapySDR.ArgInfo_STRING\n __swig_setmethods__[\"type\"] = _SoapySDR.ArgInfo_type_set\n __swig_getmethods__[\"type\"] = _SoapySDR.ArgInfo_type_get\n if _newclass:type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.ArgInfo_type_set)\n __swig_setmethods__[\"range\"] = _SoapySDR.ArgInfo_range_set\n __swig_getmethods__[\"range\"] = _SoapySDR.ArgInfo_range_get\n if _newclass:range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.ArgInfo_range_set)\n __swig_setmethods__[\"options\"] = _SoapySDR.ArgInfo_options_set\n __swig_getmethods__[\"options\"] = _SoapySDR.ArgInfo_options_get\n if _newclass:options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.ArgInfo_options_set)\n __swig_setmethods__[\"optionNames\"] = _SoapySDR.ArgInfo_optionNames_set\n __swig_getmethods__[\"optionNames\"] = _SoapySDR.ArgInfo_optionNames_get\n if _newclass:optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get, _SoapySDR.ArgInfo_optionNames_set)\n __swig_destroy__ = _SoapySDR.delete_ArgInfo\n __del__ = lambda self : None;\nArgInfo_swigregister = _SoapySDR.ArgInfo_swigregister\nArgInfo_swigregister(ArgInfo)\n\nclass SoapySDRKwargs(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargs, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRKwargs_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRKwargs___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRKwargs___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRKwargs___len__(self)\n def __iter__(self): return self.key_iterator()\n def iterkeys(self): return self.key_iterator()\n def itervalues(self): return self.value_iterator()\n def iteritems(self): return self.iterator()\n def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)\n def has_key(self, *args): return _SoapySDR.SoapySDRKwargs_has_key(self, *args)\n def keys(self): return _SoapySDR.SoapySDRKwargs_keys(self)\n def values(self): return _SoapySDR.SoapySDRKwargs_values(self)\n def items(self): return _SoapySDR.SoapySDRKwargs_items(self)\n def __contains__(self, *args): return _SoapySDR.SoapySDRKwargs___contains__(self, *args)\n def key_iterator(self): return _SoapySDR.SoapySDRKwargs_key_iterator(self)\n def value_iterator(self): return _SoapySDR.SoapySDRKwargs_value_iterator(self)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)\n def asdict(self): return _SoapySDR.SoapySDRKwargs_asdict(self)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRKwargs(*args)\n try: self.this.append(this)\n except: self.this = this\n def empty(self): return _SoapySDR.SoapySDRKwargs_empty(self)\n def size(self): return _SoapySDR.SoapySDRKwargs_size(self)\n def clear(self): return _SoapySDR.SoapySDRKwargs_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRKwargs_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRKwargs_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRKwargs_begin(self)\n def end(self): return _SoapySDR.SoapySDRKwargs_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRKwargs_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRKwargs_rend(self)\n def count(self, *args): return _SoapySDR.SoapySDRKwargs_count(self, *args)\n def erase(self, *args): return _SoapySDR.SoapySDRKwargs_erase(self, *args)\n def find(self, *args): return _SoapySDR.SoapySDRKwargs_find(self, *args)\n def lower_bound(self, *args): return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)\n def upper_bound(self, *args): return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)\n def __str__(self):\n out = list()\n for k, v in self.iteritems():\n out.append(\"%s=%s\"%(k, v))\n return '{'+(', '.join(out))+'}'\n\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs\n __del__ = lambda self : None;\nSoapySDRKwargs_swigregister = _SoapySDR.SoapySDRKwargs_swigregister\nSoapySDRKwargs_swigregister(SoapySDRKwargs)\n\nclass SoapySDRKwargsList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargsList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRKwargsList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRKwargsList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRKwargsList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRKwargsList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRKwargsList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRKwargsList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRKwargsList_empty(self)\n def size(self): return _SoapySDR.SoapySDRKwargsList_size(self)\n def clear(self): return _SoapySDR.SoapySDRKwargsList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRKwargsList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRKwargsList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRKwargsList_begin(self)\n def end(self): return _SoapySDR.SoapySDRKwargsList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRKwargsList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRKwargsList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRKwargsList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRKwargsList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRKwargsList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRKwargsList_front(self)\n def back(self): return _SoapySDR.SoapySDRKwargsList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRKwargsList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRKwargsList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRKwargsList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRKwargsList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList\n __del__ = lambda self : None;\nSoapySDRKwargsList_swigregister = _SoapySDR.SoapySDRKwargsList_swigregister\nSoapySDRKwargsList_swigregister(SoapySDRKwargsList)\n\nclass SoapySDRArgInfoList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRArgInfoList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRArgInfoList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRArgInfoList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRArgInfoList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRArgInfoList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRArgInfoList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRArgInfoList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRArgInfoList_empty(self)\n def size(self): return _SoapySDR.SoapySDRArgInfoList_size(self)\n def clear(self): return _SoapySDR.SoapySDRArgInfoList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRArgInfoList_begin(self)\n def end(self): return _SoapySDR.SoapySDRArgInfoList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRArgInfoList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRArgInfoList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRArgInfoList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRArgInfoList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRArgInfoList_front(self)\n def back(self): return _SoapySDR.SoapySDRArgInfoList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRArgInfoList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList\n __del__ = lambda self : None;\nSoapySDRArgInfoList_swigregister = _SoapySDR.SoapySDRArgInfoList_swigregister\nSoapySDRArgInfoList_swigregister(SoapySDRArgInfoList)\n\nclass SoapySDRStringList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRStringList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRStringList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRStringList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRStringList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRStringList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRStringList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRStringList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRStringList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRStringList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRStringList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRStringList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRStringList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRStringList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRStringList_empty(self)\n def size(self): return _SoapySDR.SoapySDRStringList_size(self)\n def clear(self): return _SoapySDR.SoapySDRStringList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRStringList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRStringList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRStringList_begin(self)\n def end(self): return _SoapySDR.SoapySDRStringList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRStringList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRStringList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRStringList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRStringList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRStringList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRStringList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRStringList_front(self)\n def back(self): return _SoapySDR.SoapySDRStringList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRStringList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRStringList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRStringList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRStringList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRStringList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRStringList\n __del__ = lambda self : None;\nSoapySDRStringList_swigregister = _SoapySDR.SoapySDRStringList_swigregister\nSoapySDRStringList_swigregister(SoapySDRStringList)\n\nclass SoapySDRRangeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRRangeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRRangeList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRRangeList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRRangeList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRRangeList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRRangeList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRRangeList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRRangeList_empty(self)\n def size(self): return _SoapySDR.SoapySDRRangeList_size(self)\n def clear(self): return _SoapySDR.SoapySDRRangeList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRRangeList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRRangeList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRRangeList_begin(self)\n def end(self): return _SoapySDR.SoapySDRRangeList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRRangeList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRRangeList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRRangeList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRRangeList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRRangeList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRRangeList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRRangeList_front(self)\n def back(self): return _SoapySDR.SoapySDRRangeList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRRangeList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRRangeList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRRangeList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRRangeList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRRangeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList\n __del__ = lambda self : None;\nSoapySDRRangeList_swigregister = _SoapySDR.SoapySDRRangeList_swigregister\nSoapySDRRangeList_swigregister(SoapySDRRangeList)\n\nclass SoapySDRSizeList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRSizeList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRSizeList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRSizeList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRSizeList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRSizeList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRSizeList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRSizeList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRSizeList_empty(self)\n def size(self): return _SoapySDR.SoapySDRSizeList_size(self)\n def clear(self): return _SoapySDR.SoapySDRSizeList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRSizeList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRSizeList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRSizeList_begin(self)\n def end(self): return _SoapySDR.SoapySDRSizeList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRSizeList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRSizeList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRSizeList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRSizeList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRSizeList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRSizeList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRSizeList_front(self)\n def back(self): return _SoapySDR.SoapySDRSizeList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRSizeList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRSizeList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRSizeList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRSizeList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRSizeList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList\n __del__ = lambda self : None;\nSoapySDRSizeList_swigregister = _SoapySDR.SoapySDRSizeList_swigregister\nSoapySDRSizeList_swigregister(SoapySDRSizeList)\n\nclass SoapySDRDoubleList(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRDoubleList, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList, name)\n __repr__ = _swig_repr\n def iterator(self): return _SoapySDR.SoapySDRDoubleList_iterator(self)\n def __iter__(self): return self.iterator()\n def __nonzero__(self): return _SoapySDR.SoapySDRDoubleList___nonzero__(self)\n def __bool__(self): return _SoapySDR.SoapySDRDoubleList___bool__(self)\n def __len__(self): return _SoapySDR.SoapySDRDoubleList___len__(self)\n def pop(self): return _SoapySDR.SoapySDRDoubleList_pop(self)\n def __getslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)\n def __setslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)\n def __delslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)\n def __delitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)\n def __getitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)\n def __setitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)\n def append(self, *args): return _SoapySDR.SoapySDRDoubleList_append(self, *args)\n def empty(self): return _SoapySDR.SoapySDRDoubleList_empty(self)\n def size(self): return _SoapySDR.SoapySDRDoubleList_size(self)\n def clear(self): return _SoapySDR.SoapySDRDoubleList_clear(self)\n def swap(self, *args): return _SoapySDR.SoapySDRDoubleList_swap(self, *args)\n def get_allocator(self): return _SoapySDR.SoapySDRDoubleList_get_allocator(self)\n def begin(self): return _SoapySDR.SoapySDRDoubleList_begin(self)\n def end(self): return _SoapySDR.SoapySDRDoubleList_end(self)\n def rbegin(self): return _SoapySDR.SoapySDRDoubleList_rbegin(self)\n def rend(self): return _SoapySDR.SoapySDRDoubleList_rend(self)\n def pop_back(self): return _SoapySDR.SoapySDRDoubleList_pop_back(self)\n def erase(self, *args): return _SoapySDR.SoapySDRDoubleList_erase(self, *args)\n def __init__(self, *args): \n this = _SoapySDR.new_SoapySDRDoubleList(*args)\n try: self.this.append(this)\n except: self.this = this\n def push_back(self, *args): return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)\n def front(self): return _SoapySDR.SoapySDRDoubleList_front(self)\n def back(self): return _SoapySDR.SoapySDRDoubleList_back(self)\n def assign(self, *args): return _SoapySDR.SoapySDRDoubleList_assign(self, *args)\n def resize(self, *args): return _SoapySDR.SoapySDRDoubleList_resize(self, *args)\n def insert(self, *args): return _SoapySDR.SoapySDRDoubleList_insert(self, *args)\n def reserve(self, *args): return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)\n def capacity(self): return _SoapySDR.SoapySDRDoubleList_capacity(self)\n __swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList\n __del__ = lambda self : None;\nSoapySDRDoubleList_swigregister = _SoapySDR.SoapySDRDoubleList_swigregister\nSoapySDRDoubleList_swigregister(SoapySDRDoubleList)\n\nclass StreamResult(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, StreamResult, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)\n __repr__ = _swig_repr\n def __init__(self): \n this = _SoapySDR.new_StreamResult()\n try: self.this.append(this)\n except: self.this = this\n __swig_setmethods__[\"ret\"] = _SoapySDR.StreamResult_ret_set\n __swig_getmethods__[\"ret\"] = _SoapySDR.StreamResult_ret_get\n if _newclass:ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.StreamResult_ret_set)\n __swig_setmethods__[\"flags\"] = _SoapySDR.StreamResult_flags_set\n __swig_getmethods__[\"flags\"] = _SoapySDR.StreamResult_flags_get\n if _newclass:flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.StreamResult_flags_set)\n __swig_setmethods__[\"timeNs\"] = _SoapySDR.StreamResult_timeNs_set\n __swig_getmethods__[\"timeNs\"] = _SoapySDR.StreamResult_timeNs_get\n if _newclass:timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get, _SoapySDR.StreamResult_timeNs_set)\n __swig_setmethods__[\"chanMask\"] = _SoapySDR.StreamResult_chanMask_set\n __swig_getmethods__[\"chanMask\"] = _SoapySDR.StreamResult_chanMask_get\n if _newclass:chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get, _SoapySDR.StreamResult_chanMask_set)\n def __str__(self):\n return \"ret=%s, flags=%s, timeNs=%s\"%(self.ret, self.flags, self.timeNs)\n\n __swig_destroy__ = _SoapySDR.delete_StreamResult\n __del__ = lambda self : None;\nStreamResult_swigregister = _SoapySDR.StreamResult_swigregister\nStreamResult_swigregister(StreamResult)\n\nSOAPY_SDR_TX = _SoapySDR.SOAPY_SDR_TX\nSOAPY_SDR_RX = _SoapySDR.SOAPY_SDR_RX\nSOAPY_SDR_END_BURST = _SoapySDR.SOAPY_SDR_END_BURST\nSOAPY_SDR_HAS_TIME = _SoapySDR.SOAPY_SDR_HAS_TIME\nSOAPY_SDR_END_ABRUPT = _SoapySDR.SOAPY_SDR_END_ABRUPT\nSOAPY_SDR_ONE_PACKET = _SoapySDR.SOAPY_SDR_ONE_PACKET\nSOAPY_SDR_MORE_FRAGMENTS = _SoapySDR.SOAPY_SDR_MORE_FRAGMENTS\nSOAPY_SDR_WAIT_TRIGGER = _SoapySDR.SOAPY_SDR_WAIT_TRIGGER\n\ndef SoapySDR_errToStr(*args):\n return _SoapySDR.SoapySDR_errToStr(*args)\nSoapySDR_errToStr = _SoapySDR.SoapySDR_errToStr\nSOAPY_SDR_TIMEOUT = _SoapySDR.SOAPY_SDR_TIMEOUT\nSOAPY_SDR_STREAM_ERROR = _SoapySDR.SOAPY_SDR_STREAM_ERROR\nSOAPY_SDR_CORRUPTION = _SoapySDR.SOAPY_SDR_CORRUPTION\nSOAPY_SDR_OVERFLOW = _SoapySDR.SOAPY_SDR_OVERFLOW\nSOAPY_SDR_NOT_SUPPORTED = _SoapySDR.SOAPY_SDR_NOT_SUPPORTED\nSOAPY_SDR_TIME_ERROR = _SoapySDR.SOAPY_SDR_TIME_ERROR\nSOAPY_SDR_UNDERFLOW = _SoapySDR.SOAPY_SDR_UNDERFLOW\nSOAPY_SDR_API_VERSION = _SoapySDR.SOAPY_SDR_API_VERSION\nSOAPY_SDR_ABI_VERSION = _SoapySDR.SOAPY_SDR_ABI_VERSION\n\ndef SoapySDR_getAPIVersion():\n return _SoapySDR.SoapySDR_getAPIVersion()\nSoapySDR_getAPIVersion = _SoapySDR.SoapySDR_getAPIVersion\n\ndef SoapySDR_getABIVersion():\n return _SoapySDR.SoapySDR_getABIVersion()\nSoapySDR_getABIVersion = _SoapySDR.SoapySDR_getABIVersion\n\ndef SoapySDR_getLibVersion():\n return _SoapySDR.SoapySDR_getLibVersion()\nSoapySDR_getLibVersion = _SoapySDR.SoapySDR_getLibVersion\nSOAPY_SDR_CF64 = _SoapySDR.SOAPY_SDR_CF64\nSOAPY_SDR_CF32 = _SoapySDR.SOAPY_SDR_CF32\nSOAPY_SDR_CS32 = _SoapySDR.SOAPY_SDR_CS32\nSOAPY_SDR_CU32 = _SoapySDR.SOAPY_SDR_CU32\nSOAPY_SDR_CS16 = _SoapySDR.SOAPY_SDR_CS16\nSOAPY_SDR_CU16 = _SoapySDR.SOAPY_SDR_CU16\nSOAPY_SDR_CS12 = _SoapySDR.SOAPY_SDR_CS12\nSOAPY_SDR_CU12 = _SoapySDR.SOAPY_SDR_CU12\nSOAPY_SDR_CS8 = _SoapySDR.SOAPY_SDR_CS8\nSOAPY_SDR_CU8 = _SoapySDR.SOAPY_SDR_CU8\nSOAPY_SDR_CS4 = _SoapySDR.SOAPY_SDR_CS4\nSOAPY_SDR_CU4 = _SoapySDR.SOAPY_SDR_CU4\nSOAPY_SDR_F64 = _SoapySDR.SOAPY_SDR_F64\nSOAPY_SDR_F32 = _SoapySDR.SOAPY_SDR_F32\nSOAPY_SDR_S32 = _SoapySDR.SOAPY_SDR_S32\nSOAPY_SDR_U32 = _SoapySDR.SOAPY_SDR_U32\nSOAPY_SDR_S16 = _SoapySDR.SOAPY_SDR_S16\nSOAPY_SDR_U16 = _SoapySDR.SOAPY_SDR_U16\nSOAPY_SDR_S8 = _SoapySDR.SOAPY_SDR_S8\nSOAPY_SDR_U8 = _SoapySDR.SOAPY_SDR_U8\n\ndef SoapySDR_formatToSize(*args):\n return _SoapySDR.SoapySDR_formatToSize(*args)\nSoapySDR_formatToSize = _SoapySDR.SoapySDR_formatToSize\nSOAPY_SDR_FATAL = _SoapySDR.SOAPY_SDR_FATAL\nSOAPY_SDR_CRITICAL = _SoapySDR.SOAPY_SDR_CRITICAL\nSOAPY_SDR_ERROR = _SoapySDR.SOAPY_SDR_ERROR\nSOAPY_SDR_WARNING = _SoapySDR.SOAPY_SDR_WARNING\nSOAPY_SDR_NOTICE = _SoapySDR.SOAPY_SDR_NOTICE\nSOAPY_SDR_INFO = _SoapySDR.SOAPY_SDR_INFO\nSOAPY_SDR_DEBUG = _SoapySDR.SOAPY_SDR_DEBUG\nSOAPY_SDR_TRACE = _SoapySDR.SOAPY_SDR_TRACE\nSOAPY_SDR_SSI = _SoapySDR.SOAPY_SDR_SSI\n\ndef SoapySDR_log(*args):\n return _SoapySDR.SoapySDR_log(*args)\nSoapySDR_log = _SoapySDR.SoapySDR_log\n\ndef SoapySDR_setLogLevel(*args):\n return _SoapySDR.SoapySDR_setLogLevel(*args)\nSoapySDR_setLogLevel = _SoapySDR.SoapySDR_setLogLevel\n\ndef errToStr(*args):\n return _SoapySDR.errToStr(*args)\nerrToStr = _SoapySDR.errToStr\n\ndef getAPIVersion():\n return _SoapySDR.getAPIVersion()\ngetAPIVersion = _SoapySDR.getAPIVersion\n\ndef getABIVersion():\n return _SoapySDR.getABIVersion()\ngetABIVersion = _SoapySDR.getABIVersion\n\ndef getLibVersion():\n return _SoapySDR.getLibVersion()\ngetLibVersion = _SoapySDR.getLibVersion\n\ndef getRootPath():\n return _SoapySDR.getRootPath()\ngetRootPath = _SoapySDR.getRootPath\n\ndef listSearchPaths():\n return _SoapySDR.listSearchPaths()\nlistSearchPaths = _SoapySDR.listSearchPaths\n\ndef listModules(*args):\n return _SoapySDR.listModules(*args)\nlistModules = _SoapySDR.listModules\n\ndef loadModule(*args):\n return _SoapySDR.loadModule(*args)\nloadModule = _SoapySDR.loadModule\n\ndef getLoaderResult(*args):\n return _SoapySDR.getLoaderResult(*args)\ngetLoaderResult = _SoapySDR.getLoaderResult\n\ndef unloadModule(*args):\n return _SoapySDR.unloadModule(*args)\nunloadModule = _SoapySDR.unloadModule\n\ndef loadModules():\n return _SoapySDR.loadModules()\nloadModules = _SoapySDR.loadModules\n\ndef formatToSize(*args):\n return _SoapySDR.formatToSize(*args)\nformatToSize = _SoapySDR.formatToSize\n\ndef ticksToTimeNs(*args):\n return _SoapySDR.ticksToTimeNs(*args)\nticksToTimeNs = _SoapySDR.ticksToTimeNs\n\ndef timeNsToTicks(*args):\n return _SoapySDR.timeNsToTicks(*args)\ntimeNsToTicks = _SoapySDR.timeNsToTicks\n\ndef log(*args):\n return _SoapySDR.log(*args)\nlog = _SoapySDR.log\n\ndef setLogLevel(*args):\n return _SoapySDR.setLogLevel(*args)\nsetLogLevel = _SoapySDR.setLogLevel\nclass Device(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Device, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Device, name)\n def __init__(self, *args, **kwargs): raise AttributeError(\"No constructor defined\")\n __repr__ = _swig_repr\n __swig_destroy__ = _SoapySDR.delete_Device\n __del__ = lambda self : None;\n __swig_getmethods__[\"enumerate\"] = lambda x: _SoapySDR.Device_enumerate\n if _newclass:enumerate = staticmethod(_SoapySDR.Device_enumerate)\n __swig_getmethods__[\"make\"] = lambda x: _SoapySDR.Device_make\n if _newclass:make = staticmethod(_SoapySDR.Device_make)\n __swig_getmethods__[\"unmake\"] = lambda x: _SoapySDR.Device_unmake\n if _newclass:unmake = staticmethod(_SoapySDR.Device_unmake)\n def getDriverKey(self): return _SoapySDR.Device_getDriverKey(self)\n def getHardwareKey(self): return _SoapySDR.Device_getHardwareKey(self)\n def getHardwareInfo(self): return _SoapySDR.Device_getHardwareInfo(self)\n def setFrontendMapping(self, *args): return _SoapySDR.Device_setFrontendMapping(self, *args)\n def getFrontendMapping(self, *args): return _SoapySDR.Device_getFrontendMapping(self, *args)\n def getNumChannels(self, *args): return _SoapySDR.Device_getNumChannels(self, *args)\n def getChannelInfo(self, *args): return _SoapySDR.Device_getChannelInfo(self, *args)\n def getFullDuplex(self, *args): return _SoapySDR.Device_getFullDuplex(self, *args)\n def getStreamFormats(self, *args): return _SoapySDR.Device_getStreamFormats(self, *args)\n def getNativeStreamFormat(self, *args): return _SoapySDR.Device_getNativeStreamFormat(self, *args)\n def getStreamArgsInfo(self, *args): return _SoapySDR.Device_getStreamArgsInfo(self, *args)\n def setupStream(self, *args): return _SoapySDR.Device_setupStream(self, *args)\n def closeStream(self, *args): return _SoapySDR.Device_closeStream(self, *args)\n def getStreamMTU(self, *args): return _SoapySDR.Device_getStreamMTU(self, *args)\n def activateStream(self, *args): return _SoapySDR.Device_activateStream(self, *args)\n def deactivateStream(self, *args): return _SoapySDR.Device_deactivateStream(self, *args)\n def readStream(self, *args): return _SoapySDR.Device_readStream(self, *args)\n def writeStream(self, *args): return _SoapySDR.Device_writeStream(self, *args)\n def readStreamStatus(self, *args): return _SoapySDR.Device_readStreamStatus(self, *args)\n def getNumDirectAccessBuffers(self, *args): return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)\n def getDirectAccessBufferAddrs(self, *args): return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)\n def acquireReadBuffer(self, *args): return _SoapySDR.Device_acquireReadBuffer(self, *args)\n def releaseReadBuffer(self, *args): return _SoapySDR.Device_releaseReadBuffer(self, *args)\n def acquireWriteBuffer(self, *args): return _SoapySDR.Device_acquireWriteBuffer(self, *args)\n def releaseWriteBuffer(self, *args): return _SoapySDR.Device_releaseWriteBuffer(self, *args)\n def listAntennas(self, *args): return _SoapySDR.Device_listAntennas(self, *args)\n def setAntenna(self, *args): return _SoapySDR.Device_setAntenna(self, *args)\n def getAntenna(self, *args): return _SoapySDR.Device_getAntenna(self, *args)\n def hasDCOffsetMode(self, *args): return _SoapySDR.Device_hasDCOffsetMode(self, *args)\n def setDCOffsetMode(self, *args): return _SoapySDR.Device_setDCOffsetMode(self, *args)\n def getDCOffsetMode(self, *args): return _SoapySDR.Device_getDCOffsetMode(self, *args)\n def hasDCOffset(self, *args): return _SoapySDR.Device_hasDCOffset(self, *args)\n def setDCOffset(self, *args): return _SoapySDR.Device_setDCOffset(self, *args)\n def getDCOffset(self, *args): return _SoapySDR.Device_getDCOffset(self, *args)\n def hasIQBalance(self, *args): return _SoapySDR.Device_hasIQBalance(self, *args)\n def setIQBalance(self, *args): return _SoapySDR.Device_setIQBalance(self, *args)\n def getIQBalance(self, *args): return _SoapySDR.Device_getIQBalance(self, *args)\n def hasFrequencyCorrection(self, *args): return _SoapySDR.Device_hasFrequencyCorrection(self, *args)\n def setFrequencyCorrection(self, *args): return _SoapySDR.Device_setFrequencyCorrection(self, *args)\n def getFrequencyCorrection(self, *args): return _SoapySDR.Device_getFrequencyCorrection(self, *args)\n def listGains(self, *args): return _SoapySDR.Device_listGains(self, *args)\n def hasGainMode(self, *args): return _SoapySDR.Device_hasGainMode(self, *args)\n def setGainMode(self, *args): return _SoapySDR.Device_setGainMode(self, *args)\n def getGainMode(self, *args): return _SoapySDR.Device_getGainMode(self, *args)\n def setGain(self, *args): return _SoapySDR.Device_setGain(self, *args)\n def getGain(self, *args): return _SoapySDR.Device_getGain(self, *args)\n def getGainRange(self, *args): return _SoapySDR.Device_getGainRange(self, *args)\n def setFrequency(self, *args): return _SoapySDR.Device_setFrequency(self, *args)\n def getFrequency(self, *args): return _SoapySDR.Device_getFrequency(self, *args)\n def listFrequencies(self, *args): return _SoapySDR.Device_listFrequencies(self, *args)\n def getFrequencyRange(self, *args): return _SoapySDR.Device_getFrequencyRange(self, *args)\n def getFrequencyArgsInfo(self, *args): return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)\n def setSampleRate(self, *args): return _SoapySDR.Device_setSampleRate(self, *args)\n def getSampleRate(self, *args): return _SoapySDR.Device_getSampleRate(self, *args)\n def listSampleRates(self, *args): return _SoapySDR.Device_listSampleRates(self, *args)\n def getSampleRateRange(self, *args): return _SoapySDR.Device_getSampleRateRange(self, *args)\n def setBandwidth(self, *args): return _SoapySDR.Device_setBandwidth(self, *args)\n def getBandwidth(self, *args): return _SoapySDR.Device_getBandwidth(self, *args)\n def listBandwidths(self, *args): return _SoapySDR.Device_listBandwidths(self, *args)\n def getBandwidthRange(self, *args): return _SoapySDR.Device_getBandwidthRange(self, *args)\n def setMasterClockRate(self, *args): return _SoapySDR.Device_setMasterClockRate(self, *args)\n def getMasterClockRate(self): return _SoapySDR.Device_getMasterClockRate(self)\n def getMasterClockRates(self): return _SoapySDR.Device_getMasterClockRates(self)\n def listClockSources(self): return _SoapySDR.Device_listClockSources(self)\n def setClockSource(self, *args): return _SoapySDR.Device_setClockSource(self, *args)\n def getClockSource(self): return _SoapySDR.Device_getClockSource(self)\n def listTimeSources(self): return _SoapySDR.Device_listTimeSources(self)\n def setTimeSource(self, *args): return _SoapySDR.Device_setTimeSource(self, *args)\n def getTimeSource(self): return _SoapySDR.Device_getTimeSource(self)\n def hasHardwareTime(self, what=\"\"): return _SoapySDR.Device_hasHardwareTime(self, what)\n def getHardwareTime(self, what=\"\"): return _SoapySDR.Device_getHardwareTime(self, what)\n def setHardwareTime(self, *args): return _SoapySDR.Device_setHardwareTime(self, *args)\n def setCommandTime(self, *args): return _SoapySDR.Device_setCommandTime(self, *args)\n def listSensors(self, *args): return _SoapySDR.Device_listSensors(self, *args)\n def getSensorInfo(self, *args): return _SoapySDR.Device_getSensorInfo(self, *args)\n def readSensor(self, *args): return _SoapySDR.Device_readSensor(self, *args)\n def listRegisterInterfaces(self): return _SoapySDR.Device_listRegisterInterfaces(self)\n def writeRegister(self, *args): return _SoapySDR.Device_writeRegister(self, *args)\n def readRegister(self, *args): return _SoapySDR.Device_readRegister(self, *args)\n def writeRegisters(self, *args): return _SoapySDR.Device_writeRegisters(self, *args)\n def readRegisters(self, *args): return _SoapySDR.Device_readRegisters(self, *args)\n def getSettingInfo(self, *args): return _SoapySDR.Device_getSettingInfo(self, *args)\n def writeSetting(self, *args): return _SoapySDR.Device_writeSetting(self, *args)\n def readSetting(self, *args): return _SoapySDR.Device_readSetting(self, *args)\n def listGPIOBanks(self): return _SoapySDR.Device_listGPIOBanks(self)\n def writeGPIO(self, *args): return _SoapySDR.Device_writeGPIO(self, *args)\n def readGPIO(self, *args): return _SoapySDR.Device_readGPIO(self, *args)\n def writeGPIODir(self, *args): return _SoapySDR.Device_writeGPIODir(self, *args)\n def readGPIODir(self, *args): return _SoapySDR.Device_readGPIODir(self, *args)\n def writeI2C(self, *args): return _SoapySDR.Device_writeI2C(self, *args)\n def readI2C(self, *args): return _SoapySDR.Device_readI2C(self, *args)\n def transactSPI(self, *args): return _SoapySDR.Device_transactSPI(self, *args)\n def listUARTs(self): return _SoapySDR.Device_listUARTs(self)\n def writeUART(self, *args): return _SoapySDR.Device_writeUART(self, *args)\n def readUART(self, *args): return _SoapySDR.Device_readUART(self, *args)\n def readStream__(self, *args): return _SoapySDR.Device_readStream__(self, *args)\n def writeStream__(self, *args): return _SoapySDR.Device_writeStream__(self, *args)\n def readStreamStatus__(self, *args): return _SoapySDR.Device_readStreamStatus__(self, *args)\n #call unmake from custom deleter\n def __del__(self):\n Device.unmake(self)\n\n def __str__(self):\n return \"%s:%s\"%(self.getDriverKey(), self.getHardwareKey())\n\n def readStream(self, stream, buffs, numElems, flags = 0, timeoutUs = 100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)\n\n def writeStream(self, stream, buffs, numElems, flags = 0, timeNs = 0, timeoutUs = 100000):\n ptrs = [extractBuffPointer(b) for b in buffs]\n return self.writeStream__(stream, ptrs, numElems, flags, timeNs, timeoutUs)\n\n def readStreamStatus(self, stream, timeoutUs = 100000):\n return self.readStreamStatus__(stream, timeoutUs)\n\nDevice_swigregister = _SoapySDR.Device_swigregister\nDevice_swigregister(Device)\n\ndef Device_enumerate(*args):\n return _SoapySDR.Device_enumerate(*args)\nDevice_enumerate = _SoapySDR.Device_enumerate\n\ndef Device_make(*args):\n return _SoapySDR.Device_make(*args)\nDevice_make = _SoapySDR.Device_make\n\ndef Device_unmake(*args):\n return _SoapySDR.Device_unmake(*args)\nDevice_unmake = _SoapySDR.Device_unmake\n\n__all__ = list()\nfor key in sorted(globals().keys()):\n if key.startswith('SOAPY_SDR_'):\n __all__.append(key)\n\n_Device = Device\nclass Device(Device):\n def __new__(cls, *args, **kwargs):\n return cls.make(*args, **kwargs)\n\ndef extractBuffPointer(buff):\n if hasattr(buff, '__array_interface__'): return buff.__array_interface__['data'][0]\n if hasattr(buff, '__long__'): return long(buff)\n if hasattr(buff, '__int__'): return int(buff)\n raise Exception(\"Unrecognized data format: \" + str(type(buff)))\n\n# This file is compatible with both classic and new-style classes.\n\n\n",
"step-ids": [
177,
316,
400,
419,
427
]
}
|
[
177,
316,
400,
419,
427
] |
from flask import Flask, render_template
serious12 = Flask(__name__)
@serious12.route("/")
def home():
return "HOME"
@serious12.route("/user/<username>")
def user(username):
user = {
"trung": {
"name": "Trung",
"age": 19,
"birthplace": "Hanoi"
},
"nguyenvana": {
"name": "A",
"age": 69,
"birthplace": "Trai Dat"
}
}
return render_template("user.html", user = user)
if __name__ == "__main__":
serious12.run(debug=True)
|
normal
|
{
"blob_id": "db1b6c545555116a334061440614e83e62994838",
"index": 4440,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home():\n return 'HOME'\n\n\[email protected]('/user/<username>')\ndef user(username):\n user = {'trung': {'name': 'Trung', 'age': 19, 'birthplace': 'Hanoi'},\n 'nguyenvana': {'name': 'A', 'age': 69, 'birthplace': 'Trai Dat'}}\n return render_template('user.html', user=user)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home():\n return 'HOME'\n\n\[email protected]('/user/<username>')\ndef user(username):\n user = {'trung': {'name': 'Trung', 'age': 19, 'birthplace': 'Hanoi'},\n 'nguyenvana': {'name': 'A', 'age': 69, 'birthplace': 'Trai Dat'}}\n return render_template('user.html', user=user)\n\n\nif __name__ == '__main__':\n serious12.run(debug=True)\n",
"step-3": "<mask token>\nserious12 = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return 'HOME'\n\n\[email protected]('/user/<username>')\ndef user(username):\n user = {'trung': {'name': 'Trung', 'age': 19, 'birthplace': 'Hanoi'},\n 'nguyenvana': {'name': 'A', 'age': 69, 'birthplace': 'Trai Dat'}}\n return render_template('user.html', user=user)\n\n\nif __name__ == '__main__':\n serious12.run(debug=True)\n",
"step-4": "from flask import Flask, render_template\nserious12 = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return 'HOME'\n\n\[email protected]('/user/<username>')\ndef user(username):\n user = {'trung': {'name': 'Trung', 'age': 19, 'birthplace': 'Hanoi'},\n 'nguyenvana': {'name': 'A', 'age': 69, 'birthplace': 'Trai Dat'}}\n return render_template('user.html', user=user)\n\n\nif __name__ == '__main__':\n serious12.run(debug=True)\n",
"step-5": "from flask import Flask, render_template\n\nserious12 = Flask(__name__)\n\[email protected](\"/\")\ndef home():\n return \"HOME\"\n\[email protected](\"/user/<username>\")\ndef user(username):\n user = {\n \"trung\": {\n \"name\": \"Trung\",\n \"age\": 19,\n \"birthplace\": \"Hanoi\"\n },\n \"nguyenvana\": {\n \"name\": \"A\",\n \"age\": 69,\n \"birthplace\": \"Trai Dat\"\n }\n }\n return render_template(\"user.html\", user = user)\nif __name__ == \"__main__\": \n serious12.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
df1 = pd.read_csv('Tweets1.csv', names=['tweet'])
df2 = pd.read_csv('Tweets2.csv', names=['tweet'])
df3 = pd.read_csv('Tweets3.csv', names=['tweet'])
df = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
df.to_csv('Tweets.csv', index=None, header=None)
|
normal
|
{
"blob_id": "7d6196268b85861e76efaa53e14976f2eae09405",
"index": 3226,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-3": "<mask token>\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-4": "import pandas as pd\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print(sum([int(d) for d in str(pow(2, 1000))]))
|
normal
|
{
"blob_id": "fc0c8deb3a5a57934c9e707911c352af55100c3c",
"index": 3533,
"step-1": "<mask token>\n",
"step-2": "print(sum([int(d) for d in str(pow(2, 1000))]))\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
# fmt: off
"""
Every template contains an ordered list of TemplateObjects.
TemplateObject is defined in template_objects.py
GetMemory templates are written for filters and have an answer_type
They represent the action of fetching from the memory using the filters.
Examples:
[Human, QueryBotCurrentAction],
- human: what are you doing
- human: what are you up to
[Human, QueryBot, MoveTarget],
- human: where you going
- human: where are you heading
"""
from template_objects import *
ANSWER_WITH_CORRECTION = [
## what is this + the thing at location ##
[[Human, What, Is, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what size is X + the thing at location ##
[[Human, AskSize, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what color is X + the thing at location ##
[[Human, AskColour, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
# Is X Y ##
[[Human, AskIs, BlockObjectThis, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## Is X a Y ##
[[Human, AskIs, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
]
ANSWER_TEMPLATES = [
# 1
## What is X ##
[Human, What, Is, BlockObjectThis],
[Human, What, Is, BlockObjectThis, AbstractDescription],
[Human, What, Is, BlockObjectThat],
[Human, What, Is, BlockObjectThat, AbstractDescription],
# 2
## What is at X ##
[Human, What, Is, BlockObjectLocation],
[Human, What, Is, The, AbstractDescription, BlockObjectLocation],
## What do you see at X ##
[Human, WhatSee, BlockObjectLocation],
# 3
# What size is X ##
[Human, AskSize, BlockObjectThis],
[Human, AskSize, BlockObjectThis, AbstractDescription],
[Human, AskSize, BlockObjectThis, ConcreteDescription],
[Human, AskSize, BlockObjectThat],
[Human, AskSize, BlockObjectThat, AbstractDescription],
[Human, AskSize, BlockObjectThat, ConcreteDescription],
# 4
## what size is X at Y ##
[Human, AskSize, The, AbstractDescription, BlockObjectLocation],
[Human, AskSize, The, ConcreteDescription, BlockObjectLocation],
# 5
# What colour is X ##
[Human, AskColour, BlockObjectThis],
[Human, AskColour, BlockObjectThis, AbstractDescription],
[Human, AskColour, BlockObjectThis, ConcreteDescription],
[Human, AskColour, BlockObjectThat],
[Human, AskColour, BlockObjectThat, AbstractDescription],
[Human, AskColour, BlockObjectThat, ConcreteDescription],
# 6
## what colour is X at Y ##
[Human, AskColour, The, AbstractDescription, BlockObjectLocation],
[Human, AskColour, The, ConcreteDescription, BlockObjectLocation],
# 7
## Is X Y ##
[Human, AskIs, BlockObjectThis, Size],
[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[Human, AskIs, BlockObjectThat, Size],
[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[Human, AskIs, BlockObjectThis, Colour],
[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
# 8
## Is X a Y ##
[Human, AskIs, BlockObjectThis, ConcreteDescription],
[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[Human, AskIs, BlockObjectThat, ConcreteDescription],
[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
# 9
## IS X at Y Z ##
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],
]
GET_MEMORY_TEMPLATES = [
## What are you Doing (Action name) ##
[Human, QueryBotCurrentAction],
## What are you Building (Action reference object name) ##
[Human, QueryBot, ActionReferenceObjectName],
## Where are you heading (Move target) ##
[Human, QueryBot, MoveTarget],
## Where are you (Bot location) ##
[Human, QueryBot, CurrentLocation],
] + ANSWER_TEMPLATES
|
normal
|
{
"blob_id": "ceb714e949a72f621aec8b8728fbd1201e22afd1",
"index": 8705,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-3": "<mask token>\nfrom template_objects import *\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-4": "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\n# fmt: off\n\"\"\"\nEvery template contains an ordered list of TemplateObjects.\nTemplateObject is defined in template_objects.py\n\nGetMemory templates are written for filters and have an answer_type\nThey represent the action of fetching from the memory using the filters.\n\nExamples:\n\n[Human, QueryBotCurrentAction],\n- human: what are you doing\n- human: what are you up to\n\n[Human, QueryBot, MoveTarget],\n- human: where you going\n- human: where are you heading\n\"\"\"\nfrom template_objects import *\n\nANSWER_WITH_CORRECTION = [\n ## what is this + the thing at location ##\n [[Human, What, Is, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what size is X + the thing at location ##\n [[Human, AskSize, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what color is X + the thing at location ##\n [[Human, AskColour, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n # Is X Y ##\n [[Human, AskIs, BlockObjectThis, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n [[Human, AskIs, BlockObjectThis, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## Is X a Y ##\n [[Human, AskIs, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n]\n\nANSWER_TEMPLATES = [\n # 1\n ## What is X ##\n [Human, What, Is, BlockObjectThis],\n [Human, What, Is, BlockObjectThis, AbstractDescription],\n [Human, What, Is, BlockObjectThat],\n [Human, What, Is, BlockObjectThat, AbstractDescription],\n\n # 2\n ## What is at X ##\n [Human, What, Is, BlockObjectLocation],\n [Human, What, Is, The, AbstractDescription, BlockObjectLocation],\n\n ## What do you see at X ##\n [Human, WhatSee, BlockObjectLocation],\n\n # 3\n # What size is X ##\n [Human, AskSize, BlockObjectThis],\n [Human, AskSize, BlockObjectThis, AbstractDescription],\n [Human, AskSize, BlockObjectThis, ConcreteDescription],\n [Human, AskSize, BlockObjectThat],\n [Human, AskSize, BlockObjectThat, AbstractDescription],\n [Human, AskSize, BlockObjectThat, ConcreteDescription],\n\n # 4\n ## what size is X at Y ##\n [Human, AskSize, The, AbstractDescription, BlockObjectLocation],\n [Human, AskSize, The, ConcreteDescription, BlockObjectLocation],\n\n # 5\n # What colour is X ##\n [Human, AskColour, BlockObjectThis],\n [Human, AskColour, BlockObjectThis, AbstractDescription],\n [Human, AskColour, BlockObjectThis, ConcreteDescription],\n [Human, AskColour, BlockObjectThat],\n [Human, AskColour, BlockObjectThat, AbstractDescription],\n [Human, AskColour, BlockObjectThat, ConcreteDescription],\n\n # 6\n ## what colour is X at Y ##\n [Human, AskColour, The, AbstractDescription, BlockObjectLocation],\n [Human, AskColour, The, ConcreteDescription, BlockObjectLocation],\n\n # 7\n ## Is X Y ##\n [Human, AskIs, BlockObjectThis, Size],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [Human, AskIs, BlockObjectThat, Size],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n\n [Human, AskIs, BlockObjectThis, Colour],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n\n # 8\n ## Is X a Y ##\n [Human, AskIs, BlockObjectThis, ConcreteDescription],\n [Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n\n # 9\n ## IS X at Y Z ##\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],\n\n] \n\nGET_MEMORY_TEMPLATES = [\n ## What are you Doing (Action name) ##\n [Human, QueryBotCurrentAction],\n\n ## What are you Building (Action reference object name) ##\n [Human, QueryBot, ActionReferenceObjectName],\n\n ## Where are you heading (Move target) ##\n [Human, QueryBot, MoveTarget],\n\n ## Where are you (Bot location) ##\n [Human, QueryBot, CurrentLocation],\n] + ANSWER_TEMPLATES\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
from reactivex import interval
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
from reactivex.testing.marbles import marbles_testing
from reactivex.testing.subscription import Subscription
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestSwitchMapIndex(unittest.TestCase):
def test_switch_map_indexed_uses_index(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "a"),
on_next(400, "b"),
on_next(500, "c"),
)
def create_inner(x: str, i: int):
def create_changing(j: int):
return (i, j, x)
return interval(20).pipe(ops.map(create_changing))
def create():
return xs.pipe(ops.switch_map_indexed(project=create_inner))
results = scheduler.start(create, disposed=580)
# (i, j, x): i is the index of the outer emit;
# j is the value of the inner interval;
# x is the value of the outer emission
assert results.messages == [
on_next(320, (0, 0, "a")),
on_next(340, (0, 1, "a")),
on_next(360, (0, 2, "a")),
on_next(380, (0, 3, "a")),
on_next(420, (1, 0, "b")),
on_next(440, (1, 1, "b")),
on_next(460, (1, 2, "b")),
on_next(480, (1, 3, "b")),
on_next(520, (2, 0, "c")),
on_next(540, (2, 1, "c")),
on_next(560, (2, 2, "c")),
]
assert xs.subscriptions == [Subscription(200, 580)]
def test_switch_map_indexed_inner_throws(self):
"""Inner throwing causes outer to throw"""
ex = "ex"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_next(
550,
2,
),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_next(450, "b"),
on_error(520, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 520)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_outer_throws(self):
"""Outer throwing unsubscribes from all"""
ex = "ABC"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_error(430, ex),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_error(430, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 430)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_no_inner(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(500))
# Fake inner which should never be subscribed to
sources = [scheduler.create_cold_observable(on_next(20, 2))]
def create_inner(_x: int, i: int):
return sources[i]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [on_completed(500)]
assert xs.subscriptions == [Subscription(200, 500)]
assert sources[0].subscriptions == []
def test_switch_map_indexed_inner_completes(self):
"""Inner completions do not affect outer"""
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "d"),
on_next(330, "f"),
on_completed(540),
)
def create_inner(x: str, i: int):
"""An observable which will complete after 40 ticks"""
return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(320, (0, 0, "d")),
on_next(350, (1, 0, "f")),
on_next(
370, (1, 1, "f")
), # here the current inner is unsubscribed but not the outer
on_completed(540), # only outer completion affects
]
def test_switch_map_default_mapper(self):
with marbles_testing(timespan=10) as (start, cold, hot, exp):
xs = hot(
" ---a---b------c-----",
{
"a": cold(" --1--2", None, None),
"b": cold(" --1-2-3-4-5|", None, None),
"c": cold(" --1--2", None, None),
},
None,
)
expected = exp(" -----1---1-2-3--1--2", None, None)
result = start(xs.pipe(ops.switch_map_indexed()))
assert result == expected
|
normal
|
{
"blob_id": "03dd37346ed12bbd66cbebc46fadc37be319b986",
"index": 548,
"step-1": "<mask token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n <mask token>\n <mask token>\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-2": "<mask token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-3": "<mask token>\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-4": "import unittest\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-5": "import unittest\n\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\n\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"a\"),\n on_next(400, \"b\"),\n on_next(500, \"c\"),\n )\n\n def create_inner(x: str, i: int):\n def create_changing(j: int):\n return (i, j, x)\n\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n\n results = scheduler.start(create, disposed=580)\n # (i, j, x): i is the index of the outer emit;\n # j is the value of the inner interval;\n # x is the value of the outer emission\n assert results.messages == [\n on_next(320, (0, 0, \"a\")),\n on_next(340, (0, 1, \"a\")),\n on_next(360, (0, 2, \"a\")),\n on_next(380, (0, 3, \"a\")),\n on_next(420, (1, 0, \"b\")),\n on_next(440, (1, 1, \"b\")),\n on_next(460, (1, 2, \"b\")),\n on_next(480, (1, 3, \"b\")),\n on_next(520, (2, 0, \"c\")),\n on_next(540, (2, 1, \"c\")),\n on_next(560, (2, 2, \"c\")),\n ]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = \"ex\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_next(\n 550,\n 2,\n ),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_next(450, \"b\"),\n on_error(520, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = \"ABC\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_error(430, ex),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_error(430, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n # Fake inner which should never be subscribed to\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"d\"),\n on_next(330, \"f\"),\n on_completed(540),\n )\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(320, (0, 0, \"d\")),\n on_next(350, (1, 0, \"f\")),\n on_next(\n 370, (1, 1, \"f\")\n ), # here the current inner is unsubscribed but not the outer\n on_completed(540), # only outer completion affects\n ]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(\n \" ---a---b------c-----\",\n {\n \"a\": cold(\" --1--2\", None, None),\n \"b\": cold(\" --1-2-3-4-5|\", None, None),\n \"c\": cold(\" --1--2\", None, None),\n },\n None,\n )\n expected = exp(\" -----1---1-2-3--1--2\", None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
import sys, os
sys.path.insert(0, os.path.abspath("adjust_schedule_function"))
|
normal
|
{
"blob_id": "19126e5041841ab1320730ae82d66c6900cf31bd",
"index": 9145,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.abspath('adjust_schedule_function'))\n",
"step-3": "import sys, os\nsys.path.insert(0, os.path.abspath('adjust_schedule_function'))\n",
"step-4": "import sys, os\n\nsys.path.insert(0, os.path.abspath(\"adjust_schedule_function\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
class User(object):
def __init__(self, meta):
meta.update({
'groups': meta.get('groups', []) + [meta['username']]
})
self.meta = meta
@property
def username(self):
return self.meta['username']
@property
def groups(self):
return self.meta['groups']
@property
def home_path(self):
return os.path.join('/home', self.username)
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
def __getitem__(self, key):
return self.meta.__getitem__(key)
def __repr__(self):
return repr(self.meta)
root_user = User({'username': 'root'})
|
normal
|
{
"blob_id": "aa47b7c74b9b6b8a7f014de4bd58236edeba485d",
"index": 5971,
"step-1": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n <mask token>\n <mask token>\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n\n def __repr__(self):\n return repr(self.meta)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n\n def __getitem__(self, key):\n return self.meta.__getitem__(key)\n\n def __repr__(self):\n return repr(self.meta)\n\n\n<mask token>\n",
"step-5": "import os\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({\n 'groups': meta.get('groups', []) + [meta['username']]\n })\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n\n def __getitem__(self, key):\n return self.meta.__getitem__(key)\n\n def __repr__(self):\n return repr(self.meta)\n\n\nroot_user = User({'username': 'root'})\n",
"step-ids": [
9,
11,
12,
13,
16
]
}
|
[
9,
11,
12,
13,
16
] |
def long_alpha(str1):
list1 = []
list2 = ""
maxi = 0
j = 0
for i in range(len(str1)):
if i == 0:
list2 += str1[i]
elif ord(str1[i - 1]) <= ord(str1[i]):
list2 += str1[i]
else:
list1.append(list2)
list2 = ""
list2 += str1[i]
list1.append(list2)
for i in range(len(list1)):
if maxi < len(list1[i]):
maxi = len(list1[i])
j = i
return list1[j]
str1 = "abcaklmoeeffd"
res = long_alpha(str1)
print(res)
|
normal
|
{
"blob_id": "e7c18fa99c801fd959c868954f020d8c55babe0d",
"index": 7543,
"step-1": "<mask token>\n",
"step-2": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\n<mask token>\n",
"step-3": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\n<mask token>\nprint(res)\n",
"step-4": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\nstr1 = 'abcaklmoeeffd'\nres = long_alpha(str1)\nprint(res)\n",
"step-5": "\r\ndef long_alpha(str1):\r\n list1 = []\r\n list2 = \"\"\r\n maxi = 0\r\n j = 0\r\n for i in range(len(str1)):\r\n if i == 0:\r\n list2 += str1[i]\r\n elif ord(str1[i - 1]) <= ord(str1[i]):\r\n list2 += str1[i]\r\n else:\r\n list1.append(list2)\r\n list2 = \"\"\r\n list2 += str1[i]\r\n list1.append(list2)\r\n\r\n for i in range(len(list1)):\r\n if maxi < len(list1[i]):\r\n maxi = len(list1[i])\r\n j = i\r\n return list1[j]\r\nstr1 = \"abcaklmoeeffd\"\r\nres = long_alpha(str1)\r\nprint(res)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
if sys.hexversion < 0x03000000:
from .foo import foo
|
normal
|
{
"blob_id": "485729398b51bebd16f38800c6100289b7b0b347",
"index": 9023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.hexversion < 50331648:\n from .foo import foo\n",
"step-3": "import sys\nif sys.hexversion < 50331648:\n from .foo import foo\n",
"step-4": "\nimport sys\n\nif sys.hexversion < 0x03000000:\n from .foo import foo\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# import draw as p
# ако няма __init__.py
# from draw.point import Point
from draw import Rectangle
from draw import Point
from draw import ShapeUtils
if __name__ == '__main__':
pn1 = Point(9,8)
pn2 = Point(6,4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')
rc1 = Rectangle(40,20,120,300)
rc2 = Rectangle(30,21,350,400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')
if ShapeUtils.compare(pn1,pn2) > 0:
print(f'{pn1} > {pn2}')
|
normal
|
{
"blob_id": "b984dc052201748a88fa51d25c3bd3c22404fa96",
"index": 6571,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-3": "from draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-4": "\n# import draw as p\n\n# ако няма __init__.py\n# from draw.point import Point \n\nfrom draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\n\n\n\nif __name__ == '__main__':\n pn1 = Point(9,8)\n pn2 = Point(6,4)\n\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')\n\n rc1 = Rectangle(40,20,120,300)\n rc2 = Rectangle(30,21,350,400)\n\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')\n\n if ShapeUtils.compare(pn1,pn2) > 0:\n print(f'{pn1} > {pn2}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
def test_Kmeans(*data):
x,labels_true = data
clst = cluster.KMeans()
clst.fit(x)
predicted_labels = clst.predict(x)
print("ARI: %s" % adjusted_rand_score(labels_true, predicted_labels))
print("Sum center distance %s" % (clst.inertia_,))
def test_Kmeans_nclusters(*data):
"""
测试KMeans的聚类结果随参数n_clusters的参数的影响
在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数
的变化情况
"""
x, labels_true = data
nums = range(1, 50)
ARIs = []
Distances = []
for num in nums:
clst = cluster.KMeans(n_clusters = num)
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs.append(adjusted_rand_score(labels_true, predicted_labels))
Distances.append(clst.inertia_)
# 绘图
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs, marker = "+")
ax.set_xlabel("n_clusters")
ax.set_ylabel("ARI")
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances, marker = "o")
ax.set_xlabel("n_cluster")
ax.set_ylabel("intertia_")
fig.suptitle("KMeans")
plt.show()
def test_KMeans_n_init(*data):
"""
该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响
"""
x, labels_true = data
nums = range(1, 50)
# 绘图
fig = plt.figure()
ARIs_k = []
Distances_k = []
ARIs_r = []
Distances_r = []
for num in nums:
clst = cluster.KMeans(n_init = num, init = "k-means++")
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_k.append(clst.inertia_)
clst = cluster.KMeans(n_init = num, init = "random")
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_r.append(clst.inertia_)
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs_k, marker = "+", label = "k-means++")
ax.plot(nums, ARIs_r, marker = "+", label = "random")
ax.set_xlabel("n_init")
ax.set_ylabel("ARI")
ax.set_ylim(0, 1)
ax.legend(loc = "best")
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances_k, marker = "o", label = "k-means++")
ax.plot(nums, Distances_r, marker = "o", label = "random")
ax.set_xlabel("n_init")
ax.set_ylabel("inertia_")
ax.legend(loc = "best")
fig.suptitle("KMeans")
plt.show()
|
normal
|
{
"blob_id": "bd419d0a197a5e5a99a370e45cdb53a276ac5507",
"index": 5633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n fig = plt.figure()\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init=num, init='k-means++')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n clst = cluster.KMeans(n_init=num, init='random')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker='+', label='k-means++')\n ax.plot(nums, ARIs_r, marker='+', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('ARI')\n ax.set_ylim(0, 1)\n ax.legend(loc='best')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker='o', label='k-means++')\n ax.plot(nums, Distances_r, marker='o', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('inertia_')\n ax.legend(loc='best')\n fig.suptitle('KMeans')\n plt.show()\n",
"step-4": "from sklearn import cluster\nfrom sklearn.metrics import adjusted_rand_score\nimport matplotlib.pyplot as plt\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n fig = plt.figure()\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init=num, init='k-means++')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n clst = cluster.KMeans(n_init=num, init='random')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker='+', label='k-means++')\n ax.plot(nums, ARIs_r, marker='+', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('ARI')\n ax.set_ylim(0, 1)\n ax.legend(loc='best')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker='o', label='k-means++')\n ax.plot(nums, Distances_r, marker='o', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('inertia_')\n ax.legend(loc='best')\n fig.suptitle('KMeans')\n plt.show()\n",
"step-5": "from sklearn import cluster\nfrom sklearn.metrics import adjusted_rand_score\nimport matplotlib.pyplot as plt\n\ndef test_Kmeans(*data):\n x,labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print(\"ARI: %s\" % adjusted_rand_score(labels_true, predicted_labels))\n print(\"Sum center distance %s\" % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters = num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n # 绘图\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker = \"+\")\n ax.set_xlabel(\"n_clusters\")\n ax.set_ylabel(\"ARI\")\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker = \"o\")\n ax.set_xlabel(\"n_cluster\")\n ax.set_ylabel(\"intertia_\")\n fig.suptitle(\"KMeans\")\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n # 绘图\n fig = plt.figure()\n\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init = num, init = \"k-means++\")\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n \n clst = cluster.KMeans(n_init = num, init = \"random\")\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker = \"+\", label = \"k-means++\")\n ax.plot(nums, ARIs_r, marker = \"+\", label = \"random\")\n ax.set_xlabel(\"n_init\")\n ax.set_ylabel(\"ARI\")\n ax.set_ylim(0, 1)\n ax.legend(loc = \"best\")\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker = \"o\", label = \"k-means++\")\n ax.plot(nums, Distances_r, marker = \"o\", label = \"random\")\n ax.set_xlabel(\"n_init\")\n ax.set_ylabel(\"inertia_\")\n ax.legend(loc = \"best\")\n fig.suptitle(\"KMeans\")\n plt.show()\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from aspose.email.storage.pst import *
from aspose.email.mapi import MapiCalendar
from aspose.email.mapi import MapiRecipientType
from aspose.email.mapi import MapiRecipientCollection
from aspose.email.mapi import MapiRecipient
import datetime as dt
from datetime import timedelta
import os
def run():
dataDir = "Data/"
#ExStart: RetrievingParentFolderInformationFromMessageInfo
personalStorage = PersonalStorage.from_file(dataDir + "Outlook.pst")
for folder in personalStorage.root_folder.get_sub_folders():
for messageInfo in folder.enumerate_messages():
folderInfo = personalStorage.get_parent_folder(messageInfo.entry_id)
print(folderInfo.display_name)
#ExEnd: RetrievingParentFolderInformationFromMessageInfo
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "8a6028aa477f697946ab75411b667f559e87141c",
"index": 7072,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run():\n dataDir = 'Data/'\n personalStorage = PersonalStorage.from_file(dataDir + 'Outlook.pst')\n for folder in personalStorage.root_folder.get_sub_folders():\n for messageInfo in folder.enumerate_messages():\n folderInfo = personalStorage.get_parent_folder(messageInfo.entry_id\n )\n print(folderInfo.display_name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run():\n dataDir = 'Data/'\n personalStorage = PersonalStorage.from_file(dataDir + 'Outlook.pst')\n for folder in personalStorage.root_folder.get_sub_folders():\n for messageInfo in folder.enumerate_messages():\n folderInfo = personalStorage.get_parent_folder(messageInfo.entry_id\n )\n print(folderInfo.display_name)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "from aspose.email.storage.pst import *\nfrom aspose.email.mapi import MapiCalendar\nfrom aspose.email.mapi import MapiRecipientType\nfrom aspose.email.mapi import MapiRecipientCollection\nfrom aspose.email.mapi import MapiRecipient\nimport datetime as dt\nfrom datetime import timedelta\nimport os\n\n\ndef run():\n dataDir = 'Data/'\n personalStorage = PersonalStorage.from_file(dataDir + 'Outlook.pst')\n for folder in personalStorage.root_folder.get_sub_folders():\n for messageInfo in folder.enumerate_messages():\n folderInfo = personalStorage.get_parent_folder(messageInfo.entry_id\n )\n print(folderInfo.display_name)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "from aspose.email.storage.pst import *\nfrom aspose.email.mapi import MapiCalendar\nfrom aspose.email.mapi import MapiRecipientType\nfrom aspose.email.mapi import MapiRecipientCollection\nfrom aspose.email.mapi import MapiRecipient\n\nimport datetime as dt\nfrom datetime import timedelta\n\nimport os\n\ndef run():\n\tdataDir = \"Data/\"\n\t#ExStart: RetrievingParentFolderInformationFromMessageInfo\n\tpersonalStorage = PersonalStorage.from_file(dataDir + \"Outlook.pst\")\n\n\tfor folder in personalStorage.root_folder.get_sub_folders():\n\n\t\tfor messageInfo in folder.enumerate_messages():\n\n\t\t\tfolderInfo = personalStorage.get_parent_folder(messageInfo.entry_id)\n\n\t\t\tprint(folderInfo.display_name)\n\t#ExEnd: RetrievingParentFolderInformationFromMessageInfo\n\t\nif __name__ == '__main__':\n run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.7 on 2019-04-01 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0004_auto_20190401_1834'),
]
operations = [
migrations.AlterField(
model_name='mainsubmission',
name='execution_time',
field=models.DecimalField(blank=True, decimal_places=3, default=0, max_digits=6, null=True),
),
]
|
normal
|
{
"blob_id": "3fed8723d215bce3cf391752e07ca85b2d6701a3",
"index": 3410,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('submissions', '0004_auto_20190401_1834')]\n operations = [migrations.AlterField(model_name='mainsubmission', name=\n 'execution_time', field=models.DecimalField(blank=True,\n decimal_places=3, default=0, max_digits=6, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('submissions', '0004_auto_20190401_1834')]\n operations = [migrations.AlterField(model_name='mainsubmission', name=\n 'execution_time', field=models.DecimalField(blank=True,\n decimal_places=3, default=0, max_digits=6, null=True))]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-04-01 14:37\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('submissions', '0004_auto_20190401_1834'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='mainsubmission',\n name='execution_time',\n field=models.DecimalField(blank=True, decimal_places=3, default=0, max_digits=6, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
from random import *
prob = "change"
cases = [
10,
10,
10,
100,
100,
100000,
100000,
100000,
100000,
100000
]
cur = 0
st = [1,2,5,10,20,50,100,200,500,1000,2000,5000,10000]
for (n) in cases :
cout = ""
cur += 1
print "make %d..." % cur
##-----
#TODO generate the data
tot = 0
stt = []
for a in st :
for b in st :
if b < a and a <= n :
tot = tot + 1
stt.append( (a,b) )
cout += "%d\n" % tot
for (a,b) in stt :
cout += "%d %d\n" % (a, b)
##-----
f = file( prob + str(cur) + ".in", "w" )
f.write( cout )
f.close()
|
normal
|
{
"blob_id": "2cef5311a9ff9497ad6611fe7b47e4f7c5b1b3c7",
"index": 7581,
"step-1": "#!/usr/bin/python\n\nfrom random import *\n\nprob = \"change\"\n\ncases = [ \n 10,\n 10,\n 10,\n 100,\n 100,\n 100000,\n 100000,\n 100000,\n 100000,\n 100000\n ]\ncur = 0\n\nst = [1,2,5,10,20,50,100,200,500,1000,2000,5000,10000]\n\nfor (n) in cases :\n cout = \"\"\n cur += 1\n print \"make %d...\" % cur\n##-----\n#TODO generate the data\n\n tot = 0\n stt = []\n for a in st :\n for b in st :\n if b < a and a <= n :\n tot = tot + 1\n stt.append( (a,b) )\n cout += \"%d\\n\" % tot\n for (a,b) in stt :\n cout += \"%d %d\\n\" % (a, b)\n\n\n##-----\n f = file( prob + str(cur) + \".in\", \"w\" )\n f.write( cout )\n f.close()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/bin/env python
# coding: utf-8
"""
Dakara Online protocol generator, by Alejandro Santos
"""
from genpackets import *
from gendefs_js import *
BUILDERS = []
HANDLERS = []
DECODE_DISPATCH = []
ARGS_HANDLER = []
def write_packets_from(f, fph, base_name, namespace, P):
# Enum with IDs
if base_name != "ServerPacket" :
f.write("""var {base_name}ID = {{ \n""".format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(" {name} : {packet_id}".format(base_name=base_name, name=x.name, packet_id=i))
f.write(",\n")
f.write(""" {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n""".format(base_name=base_name, packet_id=len(P)))
# Factory
'''
f.write("""
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write("""
case {i}:
p = new {name}(buffer);
break;
""".format(i=i, name=x.name))
f.write("""
}}
return p;
}}
""".format())
'''
for i, x in enumerate(P):
if not x: continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ""
min_byte_count = 0
ctor_fields_bytequeue = ""
parametros_fields = ""
parametros_args = ""
serialize_fields = ""
if x.name == "MultiMessage":
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 0xff
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = ((y[1] & TYPE_ARRAY) == TYPE_ARRAY)
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ", " + arg_name + "()"
items_assign_e.append(" {arg_name}: {arg_name},".format(arg_name=arg_name))
items_assign_build.append(" e.{arg_name}= {arg_name};".format(arg_name=arg_name))
if arg_is_array:
array_size=y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(" {arg_name}; ".format(arg_type_str=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append("{arg_name} ".format(arg_type_str=arg_type_sig_str, arg_name=arg_name, array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(" {arg_type_str} {arg_name}; ".format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append("{arg_type_str} {arg_name}".format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name)
format_args = {
'base_name': base_name,
'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields,
'packet_id': i,
'min_byte_count': min_byte_count,
'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields,
'parametros_fields' : parametros_fields,
'parametros_args' : parametros_args
}
# Individual packet header
if base_name != "ServerPacket" :
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == "ServerPacket" :
HANDLERS.append(x.get_handler_fmt().format(**format_args))
#para el serverpacketdecodeanddispatch (sin tener que crear packetes)
if base_name == "ServerPacket" :
dec_dispatch = x.get_parametros_fmt().format(**format_args);
#le saco la ultima coma si es que tiene:
pos = dec_dispatch.rfind(",")
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos+1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == "ServerPacket" :
args_handler = x.get_argumentosHandler_fmt().format(**format_args);
#le saco la ultima coma si es que tiene:
pos = args_handler.rfind(",")
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos+1:]
#le saco fin de linea
pos = args_handler.rfind("\n")
args_handler = args_handler[:pos] + args_handler[pos+1:]
ARGS_HANDLER.append(args_handler)
# Decode and Dispatch, keeping the Packet in the stack
# Suggested by hmk
if base_name == "ServerPacket" :
f.write("""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write("""
case {i}:
{{
{decode_dispatch}
break;
}}
""".format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write("""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
""".format())
fph.write("""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
fph.write("""\n\thandle{name}: function ({arg_handler}){{ \n""".format(base_name=base_name, name=x.name, arg_handler = ARGS_HANDLER.pop(0)))
#fph.write(HANDLERS.pop(0))
fph.write("""\t\tlog.network("TODO: handle{name} ");\n\t}},\n""".format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x: continue
#fph.write("""\n\thandle{name}: function (p){{ \n""".format(base_name=base_name, name=x.name))
#fph.write(HANDLERS.pop(0))
#fph.write("""\t\talert("TODO: handle{name} ");\n\t}},\n""".format(base_name=base_name, name=x.name))
fph.write("""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
""")
def write_packets():
f = open("protocol.js", "w")
fph = open("protocolhandlerAux.js", "w")
f.write("""
/* Automatically generated file */
define(['enums'], function (Enums) {
""")
write_packets_from(f,fph, "ClientPacket", "client", CLIENT_PACKETS)
write_packets_from(f,fph, "ClientGMPacket", "clientgm", CLIENT_GM_PACKETS)
write_packets_from(f,fph, "ServerPacket", "server", SERVER_PACKETS)
#Multimessages hardcodeado: // TODO ; hacerlo bien
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write("""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
""")
f.write("""
}
return Protocolo;
}); """)
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append('''
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
''')
ARGS_HANDLER.append("msgIdx,args")
def main():
write_packets()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "22dccf6bb76dab735f373089d0772f475b2d5a5d",
"index": 6849,
"step-1": "<mask token>\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom genpackets import *\nfrom gendefs_js import *\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/bin/env python\n# coding: utf-8\n\n\"\"\"\nDakara Online protocol generator, by Alejandro Santos\n\"\"\"\n\nfrom genpackets import *\nfrom gendefs_js import *\n\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\ndef write_packets_from(f, fph, base_name, namespace, P):\n\n\n # Enum with IDs\n if base_name != \"ServerPacket\" :\n \tf.write(\"\"\"var {base_name}ID = {{ \\n\"\"\".format(base_name=base_name))\n \tfor i, x in enumerate(P):\n \t\tif x:\n \t\t\tf.write(\" {name} : {packet_id}\".format(base_name=base_name, name=x.name, packet_id=i))\n \t\t\tf.write(\",\\n\")\n \tf.write(\"\"\" {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n\"\"\".format(base_name=base_name, packet_id=len(P)))\n\n# Factory\n '''\n f.write(\"\"\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\".format(i=i, name=x.name))\n\n f.write(\"\"\"\n }}\n return p;\n}}\n\"\"\".format())\n '''\n \n for i, x in enumerate(P):\n if not x: continue\n\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = \"\"\n min_byte_count = 0\n ctor_fields_bytequeue = \"\"\n parametros_fields = \"\"\n parametros_args = \"\"\n serialize_fields = \"\"\n\n if x.name == \"MultiMessage\":\n escribir_multimessage(f)\n continue\n\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 0xff\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = ((y[1] & TYPE_ARRAY) == TYPE_ARRAY)\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n\n ctor_fields += \", \" + arg_name + \"()\"\n\n items_assign_e.append(\" {arg_name}: {arg_name},\".format(arg_name=arg_name))\n items_assign_build.append(\" e.{arg_name}= {arg_name};\".format(arg_name=arg_name))\n\n if arg_is_array:\n array_size=y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(\" {arg_name}; \".format(arg_type_str=arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append(\"{arg_name} \".format(arg_type_str=arg_type_sig_str, arg_name=arg_name, array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n \tparametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n \tparametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(\" {arg_type_str} {arg_name}; \".format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append(\"{arg_type_str} {arg_name}\".format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name)\n\n format_args = {\n 'base_name': base_name,\n 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields,\n 'packet_id': i,\n 'min_byte_count': min_byte_count,\n 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields,\n 'parametros_fields' : parametros_fields,\n 'parametros_args' : parametros_args\n }\n\n # Individual packet header\n if base_name != \"ServerPacket\" :\n \tf.write(x.get_header_fmt().format(**format_args))\n \tBUILDERS.append(x.get_builder_fmt().format(**format_args))\n\n if base_name == \"ServerPacket\" :\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n\n #para el serverpacketdecodeanddispatch (sin tener que crear packetes)\n if base_name == \"ServerPacket\" :\n \tdec_dispatch = x.get_parametros_fmt().format(**format_args);\n \t#le saco la ultima coma si es que tiene:\n \tpos = dec_dispatch.rfind(\",\")\n \tif pos > 0:\n \t\tdec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos+1:]\n \tDECODE_DISPATCH.append(dec_dispatch)\n\n if base_name == \"ServerPacket\" :\n args_handler = x.get_argumentosHandler_fmt().format(**format_args);\n #le saco la ultima coma si es que tiene:\n pos = args_handler.rfind(\",\")\n if pos > 0:\n \targs_handler = args_handler[:pos] + args_handler[pos+1:]\n #le saco fin de linea\n pos = args_handler.rfind(\"\\n\")\n args_handler = args_handler[:pos] + args_handler[pos+1:]\n ARGS_HANDLER.append(args_handler)\n\n\n\n\n\n \n # Decode and Dispatch, keeping the Packet in the stack\n # Suggested by hmk\n if base_name == \"ServerPacket\" :\n f.write(\"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\".format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n\n f.write(\"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\".format())\n\n fph.write(\"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\".format(base_name=base_name))\n for i, x in enumerate(P):\n if not x: continue\n fph.write(\"\"\"\\n\\thandle{name}: function ({arg_handler}){{ \\n\"\"\".format(base_name=base_name, name=x.name, arg_handler = ARGS_HANDLER.pop(0)))\n #fph.write(HANDLERS.pop(0))\n fph.write(\"\"\"\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n\"\"\".format(base_name=base_name, name=x.name))\n\n for i, x in enumerate(P):\n if not x: continue\n #fph.write(\"\"\"\\n\\thandle{name}: function (p){{ \\n\"\"\".format(base_name=base_name, name=x.name))\n #fph.write(HANDLERS.pop(0))\n #fph.write(\"\"\"\\t\\talert(\"TODO: handle{name} \");\\n\\t}},\\n\"\"\".format(base_name=base_name, name=x.name))\n\n fph.write(\"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\")\n\n\ndef write_packets():\n f = open(\"protocol.js\", \"w\")\n fph = open(\"protocolhandlerAux.js\", \"w\")\n\n f.write(\"\"\"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n\"\"\")\n\n write_packets_from(f,fph, \"ClientPacket\", \"client\", CLIENT_PACKETS)\n write_packets_from(f,fph, \"ClientGMPacket\", \"clientgm\", CLIENT_GM_PACKETS)\n write_packets_from(f,fph, \"ServerPacket\", \"server\", SERVER_PACKETS)\n\n #Multimessages hardcodeado: // TODO ; hacerlo bien\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n\n f.write(\"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\")\n f.write(\"\"\"\n }\n\n return Protocolo;\n}); \"\"\")\n\n\n\n\n\n f.close()\n fph.close()\n\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append('''\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n''')\n ARGS_HANDLER.append(\"msgIdx,args\")\n\n\ndef main():\n write_packets()\n\nif __name__ == '__main__':\n main()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
fahrenheit = float(input("Enter a fahrenheit degree: "))
celcius = ((fahrenheit - 32) * 5) / 9
print("From fahrenheit to celcius", celcius)
|
normal
|
{
"blob_id": "2f2030107f3a23c0d2f404a838eaccc8b35ac410",
"index": 1086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('From fahrenheit to celcius', celcius)\n",
"step-3": "fahrenheit = float(input('Enter a fahrenheit degree: '))\ncelcius = (fahrenheit - 32) * 5 / 9\nprint('From fahrenheit to celcius', celcius)\n",
"step-4": "fahrenheit = float(input(\"Enter a fahrenheit degree: \"))\ncelcius = ((fahrenheit - 32) * 5) / 9\nprint(\"From fahrenheit to celcius\", celcius)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name
|
normal
|
{
"blob_id": "8f01934472805b5ad6dca328483a7ac79ae7748a",
"index": 6474,
"step-1": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n <mask token>\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n <mask token>\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-2": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n <mask token>\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-3": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n\n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _, _, _, it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs, targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n return acc, correct\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-4": "import numpy as np\nfrom ..utils import Dataset\nimport math\nimport random\nfrom .interface import TopicModel\nfrom .man_model.models import *\nfrom .man_model import utils\nfrom .man_model.options import opt\nimport torch.utils.data as data_utils\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport itertools\nfrom torchnet.meter import ConfusionMeter\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nfrom torch.utils.data import ConcatDataset, DataLoader\n<mask token>\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n\n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _, _, _, it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs, targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n return acc, correct\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-5": "#This version assumes domains = train/test set\nimport numpy as np\nfrom ..utils import Dataset\nimport math\nimport random\nfrom .interface import TopicModel\nfrom .man_model.models import *\nfrom .man_model import utils\nfrom .man_model.options import opt\nimport torch.utils.data as data_utils\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport itertools\nfrom torchnet.meter import ConfusionMeter\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nfrom torch.utils.data import ConcatDataset, DataLoader\n\n\"\"\"\nIMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32\n=> need to convert. Dont know if same thing for target tho?\nAlso apparently, domain labels retrieved from get_domain_labels cannot be -1?\nOutput size for C HAS TO BE 2 even if it's a binary classification\n\"\"\"\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)\n\nclass MultinomialAdversarialNetwork(TopicModel):\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k,m,model_params,log_params)\n \n def prepare_data(self,d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n \n \n def fit(self, d, *args, **kwargs):\n #minibatches = create_minibatch(X, y, z, batch_size)\n #TODO: make this able to fit consecutively\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)\n #Training\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)\n# print(\"try\")\n# print(opt.device)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n# print(\"endtry\")\n# # optimizers\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n\n # training accuracy\n correct, total = defaultdict(int), defaultdict(int)\n # D accuracy\n d_correct, d_total = 0, 0\n # conceptually view 1 epoch as 1 epoch of the first domain\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n # D iterations\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n # optional WGAN n_critic trick\n n_critic = opt.n_critic\n\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n # train on both labeled and unlabeled domains\n for domain in opt.unlabeled_domains:\n # targets not used\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n # D accuracy\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred==tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred==d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n # F&C iteration\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n #if opt.fix_emb:\n # utils.freeze_net(self.F_s.word_emb)\n # map(utils.freeze_net, self.F_d.values())\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n #target = torch.int64 rn\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n #return c_outputs, targets\n #DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n # training accuracy\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n # update F with D gradients on all domains\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n \n\n optimizer.step()\n \n\n# print(loss_d)\n# print('l_d loss: {}'.format(l_d.item()))\n# print('l_c loss: {}'.format(l_c.item()))\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs[\"display_step\"] == 0:\n print(\n \"Epoch:\", \"%04d, done\" % (epoch + 1) #\"cost=\", \"{:.9f}\"#.format(l_d.data[0])\n )\n return loss_d_res, l_d_res, l_c_res\n \n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _,_,_,it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs,targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n #preds.extend(pred.data)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n #('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))\n return acc, correct\n #return preds\n \n def get_name(self):\n if self._name is None:\n self._name = \"MAN({},{},{})\".format(self.k,self.m,1)\n return self._name",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from unittest.mock import AsyncMock, Mock, patch
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, JoinRules
from synapse.api.errors import Codes, ResourceLimitError
from synapse.api.filtering import Filtering
from synapse.api.room_versions import RoomVersions
from synapse.handlers.sync import SyncConfig, SyncResult
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
from synapse.types import UserID, create_requester
from synapse.util import Clock
import tests.unittest
import tests.utils
class SyncTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler."""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sync_handler = self.hs.get_sync_handler()
self.store = self.hs.get_datastores().main
# AuthBlocking reads from the hs' config on initialization. We need to
# modify its config instead of the hs'
self.auth_blocking = self.hs.get_auth_blocking()
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
user_id1 = "@user1:test"
user_id2 = "@user2:test"
sync_config = generate_sync_config(user_id1)
requester = create_requester(user_id1)
self.reactor.advance(100) # So we get not 0 time
self.auth_blocking._limit_usage_by_mau = True
self.auth_blocking._max_mau_value = 1
# Check that the happy case does not throw errors
self.get_success(self.store.upsert_monthly_active_user(user_id1))
self.get_success(
self.sync_handler.wait_for_sync_for_user(requester, sync_config)
)
# Test that global lock works
self.auth_blocking._hs_disabled = True
e = self.get_failure(
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
self.auth_blocking._hs_disabled = False
sync_config = generate_sync_config(user_id2)
requester = create_requester(user_id2)
e = self.get_failure(
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
ResourceLimitError,
)
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
def test_unknown_room_version(self) -> None:
"""
A room with an unknown room version should not break sync (and should be excluded).
"""
inviter = self.register_user("creator", "pass", admin=True)
inviter_tok = self.login("@creator:test", "pass")
user = self.register_user("user", "pass")
tok = self.login("user", "pass")
# Do an initial sync on a different device.
requester = create_requester(user)
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user, device_id="dev")
)
)
# Create a room as the user.
joined_room = self.helper.create_room_as(user, tok=tok)
# Invite the user to the room as someone else.
invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)
self.helper.invite(invite_room, targ=user, tok=inviter_tok)
knock_room = self.helper.create_room_as(
inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok
)
self.helper.send_state(
knock_room,
EventTypes.JoinRules,
{"join_rule": JoinRules.KNOCK},
tok=inviter_tok,
)
channel = self.make_request(
"POST",
"/_matrix/client/r0/knock/%s" % (knock_room,),
b"{}",
tok,
)
self.assertEqual(200, channel.code, channel.result)
# The rooms should appear in the sync response.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Test a incremental sync (by providing a since_token).
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Poke the database and update the room version to an unknown one.
for room_id in (joined_room, invite_room, knock_room):
self.get_success(
self.hs.get_datastores().main.db_pool.simple_update(
"rooms",
keyvalues={"room_id": room_id},
updatevalues={"room_version": "unknown-room-version"},
desc="updated-room-version",
)
)
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
self.store.get_rooms_for_user.invalidate_all()
self.store._get_event_cache.clear()
self.store._event_ref.clear()
# The rooms should be excluded from the sync response.
# Get a new request key.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
# The rooms should also not be in an incremental sync.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
def test_ban_wins_race_with_join(self) -> None:
"""Rooms shouldn't appear under "joined" if a join loses a race to a ban.
A complicated edge case. Imagine the following scenario:
* you attempt to join a room
* racing with that is a ban which comes in over federation, which ends up with
an earlier stream_ordering than the join.
* you get a sync response with a sync token which is _after_ the ban, but before
the join
* now your join lands; it is a valid event because its `prev_event`s predate the
ban, but will not make it into current_state_events (because bans win over
joins in state res, essentially).
* When we do a sync from the incremental sync, the only event in the timeline
is your join ... and yet you aren't joined.
The ban coming in over federation isn't crucial for this behaviour; the key
requirements are:
1. the homeserver generates a join event with prev_events that precede the ban
(so that it passes the "are you banned" test)
2. the join event has a stream_ordering after that of the ban.
We use monkeypatching to artificially trigger condition (1).
"""
# A local user Alice creates a room.
owner = self.register_user("alice", "password")
owner_tok = self.login(owner, "password")
room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)
# Do a sync as Alice to get the latest event in the room.
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(owner), generate_sync_config(owner)
)
)
self.assertEqual(len(alice_sync_result.joined), 1)
self.assertEqual(alice_sync_result.joined[0].room_id, room_id)
last_room_creation_event_id = (
alice_sync_result.joined[0].timeline.events[-1].event_id
)
# Eve, a ne'er-do-well, registers.
eve = self.register_user("eve", "password")
eve_token = self.login(eve, "password")
# Alice preemptively bans Eve.
self.helper.ban(room_id, owner, eve, tok=owner_tok)
# Eve syncs.
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(eve)
eve_sync_after_ban: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
)
# Sanity check this sync result. We shouldn't be joined to the room.
self.assertEqual(eve_sync_after_ban.joined, [])
# Eve tries to join the room. We monkey patch the internal logic which selects
# the prev_events used when creating the join event, such that the ban does not
# precede the join.
mocked_get_prev_events = patch.object(
self.hs.get_datastores().main,
"get_prev_events_for_room",
new_callable=AsyncMock,
return_value=[last_room_creation_event_id],
)
with mocked_get_prev_events:
self.helper.join(room_id, eve, tok=eve_token)
# Eve makes a second, incremental sync.
eve_incremental_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=eve_sync_after_ban.next_batch,
)
)
# Eve should not see herself as joined to the room.
self.assertEqual(eve_incremental_sync_after_join.joined, [])
# If we did a third initial sync, we should _still_ see eve is not joined to the room.
eve_initial_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=None,
)
)
self.assertEqual(eve_initial_sync_after_join.joined, [])
_request_key = 0
def generate_sync_config(
user_id: str, device_id: Optional[str] = "device_id"
) -> SyncConfig:
"""Generate a sync config (with a unique request key)."""
global _request_key
_request_key += 1
return SyncConfig(
user=UserID.from_string(user_id),
filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,
is_guest=False,
request_key=("request_key", _request_key),
device_id=device_id,
)
|
normal
|
{
"blob_id": "fc5b9117ecf56401a888e2b6a5e244f9ab115e41",
"index": 3999,
"step-1": "<mask token>\n\n\nclass SyncTestCase(tests.unittest.HomeserverTestCase):\n <mask token>\n servlets = [admin.register_servlets, knock.register_servlets, login.\n register_servlets, room.register_servlets]\n\n def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer\n ) ->None:\n self.sync_handler = self.hs.get_sync_handler()\n self.store = self.hs.get_datastores().main\n self.auth_blocking = self.hs.get_auth_blocking()\n\n def test_wait_for_sync_for_user_auth_blocking(self) ->None:\n user_id1 = '@user1:test'\n user_id2 = '@user2:test'\n sync_config = generate_sync_config(user_id1)\n requester = create_requester(user_id1)\n self.reactor.advance(100)\n self.auth_blocking._limit_usage_by_mau = True\n self.auth_blocking._max_mau_value = 1\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(self.sync_handler.wait_for_sync_for_user(requester,\n sync_config))\n self.auth_blocking._hs_disabled = True\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n self.auth_blocking._hs_disabled = False\n sync_config = generate_sync_config(user_id2)\n requester = create_requester(user_id2)\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n def test_unknown_room_version(self) ->None:\n \"\"\"\n A room with an unknown room version should not break sync (and should be excluded).\n \"\"\"\n inviter = self.register_user('creator', 'pass', admin=True)\n inviter_tok = self.login('@creator:test', 'pass')\n user = self.register_user('user', 'pass')\n tok = self.login('user', 'pass')\n requester = create_requester(user)\n initial_result = self.get_success(self.sync_handler.\n wait_for_sync_for_user(requester, sync_config=\n generate_sync_config(user, device_id='dev')))\n joined_room = self.helper.create_room_as(user, tok=tok)\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n knock_room = self.helper.create_room_as(inviter, room_version=\n RoomVersions.V7.identifier, tok=inviter_tok)\n self.helper.send_state(knock_room, EventTypes.JoinRules, {\n 'join_rule': JoinRules.KNOCK}, tok=inviter_tok)\n channel = self.make_request('POST', '/_matrix/client/r0/knock/%s' %\n (knock_room,), b'{}', tok)\n self.assertEqual(200, channel.code, channel.result)\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(self.hs.get_datastores().main.db_pool.\n simple_update('rooms', keyvalues={'room_id': room_id},\n updatevalues={'room_version': 'unknown-room-version'}, desc\n ='updated-room-version'))\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n def test_ban_wins_race_with_join(self) ->None:\n \"\"\"Rooms shouldn't appear under \"joined\" if a join loses a race to a ban.\n\n A complicated edge case. Imagine the following scenario:\n\n * you attempt to join a room\n * racing with that is a ban which comes in over federation, which ends up with\n an earlier stream_ordering than the join.\n * you get a sync response with a sync token which is _after_ the ban, but before\n the join\n * now your join lands; it is a valid event because its `prev_event`s predate the\n ban, but will not make it into current_state_events (because bans win over\n joins in state res, essentially).\n * When we do a sync from the incremental sync, the only event in the timeline\n is your join ... and yet you aren't joined.\n\n The ban coming in over federation isn't crucial for this behaviour; the key\n requirements are:\n 1. the homeserver generates a join event with prev_events that precede the ban\n (so that it passes the \"are you banned\" test)\n 2. the join event has a stream_ordering after that of the ban.\n\n We use monkeypatching to artificially trigger condition (1).\n \"\"\"\n owner = self.register_user('alice', 'password')\n owner_tok = self.login(owner, 'password')\n room_id = self.helper.create_room_as(owner, is_public=True, tok=\n owner_tok)\n alice_sync_result: SyncResult = self.get_success(self.sync_handler.\n wait_for_sync_for_user(create_requester(owner),\n generate_sync_config(owner)))\n self.assertEqual(len(alice_sync_result.joined), 1)\n self.assertEqual(alice_sync_result.joined[0].room_id, room_id)\n last_room_creation_event_id = alice_sync_result.joined[0\n ].timeline.events[-1].event_id\n eve = self.register_user('eve', 'password')\n eve_token = self.login(eve, 'password')\n self.helper.ban(room_id, owner, eve, tok=owner_tok)\n eve_requester = create_requester(eve)\n eve_sync_config = generate_sync_config(eve)\n eve_sync_after_ban: SyncResult = self.get_success(self.sync_handler\n .wait_for_sync_for_user(eve_requester, eve_sync_config))\n self.assertEqual(eve_sync_after_ban.joined, [])\n mocked_get_prev_events = patch.object(self.hs.get_datastores().main,\n 'get_prev_events_for_room', new_callable=AsyncMock,\n return_value=[last_room_creation_event_id])\n with mocked_get_prev_events:\n self.helper.join(room_id, eve, tok=eve_token)\n eve_incremental_sync_after_join: SyncResult = self.get_success(self\n .sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=eve_sync_after_ban.next_batch))\n self.assertEqual(eve_incremental_sync_after_join.joined, [])\n eve_initial_sync_after_join: SyncResult = self.get_success(self.\n sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=None))\n self.assertEqual(eve_initial_sync_after_join.joined, [])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SyncTestCase(tests.unittest.HomeserverTestCase):\n \"\"\"Tests Sync Handler.\"\"\"\n servlets = [admin.register_servlets, knock.register_servlets, login.\n register_servlets, room.register_servlets]\n\n def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer\n ) ->None:\n self.sync_handler = self.hs.get_sync_handler()\n self.store = self.hs.get_datastores().main\n self.auth_blocking = self.hs.get_auth_blocking()\n\n def test_wait_for_sync_for_user_auth_blocking(self) ->None:\n user_id1 = '@user1:test'\n user_id2 = '@user2:test'\n sync_config = generate_sync_config(user_id1)\n requester = create_requester(user_id1)\n self.reactor.advance(100)\n self.auth_blocking._limit_usage_by_mau = True\n self.auth_blocking._max_mau_value = 1\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(self.sync_handler.wait_for_sync_for_user(requester,\n sync_config))\n self.auth_blocking._hs_disabled = True\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n self.auth_blocking._hs_disabled = False\n sync_config = generate_sync_config(user_id2)\n requester = create_requester(user_id2)\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n def test_unknown_room_version(self) ->None:\n \"\"\"\n A room with an unknown room version should not break sync (and should be excluded).\n \"\"\"\n inviter = self.register_user('creator', 'pass', admin=True)\n inviter_tok = self.login('@creator:test', 'pass')\n user = self.register_user('user', 'pass')\n tok = self.login('user', 'pass')\n requester = create_requester(user)\n initial_result = self.get_success(self.sync_handler.\n wait_for_sync_for_user(requester, sync_config=\n generate_sync_config(user, device_id='dev')))\n joined_room = self.helper.create_room_as(user, tok=tok)\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n knock_room = self.helper.create_room_as(inviter, room_version=\n RoomVersions.V7.identifier, tok=inviter_tok)\n self.helper.send_state(knock_room, EventTypes.JoinRules, {\n 'join_rule': JoinRules.KNOCK}, tok=inviter_tok)\n channel = self.make_request('POST', '/_matrix/client/r0/knock/%s' %\n (knock_room,), b'{}', tok)\n self.assertEqual(200, channel.code, channel.result)\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(self.hs.get_datastores().main.db_pool.\n simple_update('rooms', keyvalues={'room_id': room_id},\n updatevalues={'room_version': 'unknown-room-version'}, desc\n ='updated-room-version'))\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n def test_ban_wins_race_with_join(self) ->None:\n \"\"\"Rooms shouldn't appear under \"joined\" if a join loses a race to a ban.\n\n A complicated edge case. Imagine the following scenario:\n\n * you attempt to join a room\n * racing with that is a ban which comes in over federation, which ends up with\n an earlier stream_ordering than the join.\n * you get a sync response with a sync token which is _after_ the ban, but before\n the join\n * now your join lands; it is a valid event because its `prev_event`s predate the\n ban, but will not make it into current_state_events (because bans win over\n joins in state res, essentially).\n * When we do a sync from the incremental sync, the only event in the timeline\n is your join ... and yet you aren't joined.\n\n The ban coming in over federation isn't crucial for this behaviour; the key\n requirements are:\n 1. the homeserver generates a join event with prev_events that precede the ban\n (so that it passes the \"are you banned\" test)\n 2. the join event has a stream_ordering after that of the ban.\n\n We use monkeypatching to artificially trigger condition (1).\n \"\"\"\n owner = self.register_user('alice', 'password')\n owner_tok = self.login(owner, 'password')\n room_id = self.helper.create_room_as(owner, is_public=True, tok=\n owner_tok)\n alice_sync_result: SyncResult = self.get_success(self.sync_handler.\n wait_for_sync_for_user(create_requester(owner),\n generate_sync_config(owner)))\n self.assertEqual(len(alice_sync_result.joined), 1)\n self.assertEqual(alice_sync_result.joined[0].room_id, room_id)\n last_room_creation_event_id = alice_sync_result.joined[0\n ].timeline.events[-1].event_id\n eve = self.register_user('eve', 'password')\n eve_token = self.login(eve, 'password')\n self.helper.ban(room_id, owner, eve, tok=owner_tok)\n eve_requester = create_requester(eve)\n eve_sync_config = generate_sync_config(eve)\n eve_sync_after_ban: SyncResult = self.get_success(self.sync_handler\n .wait_for_sync_for_user(eve_requester, eve_sync_config))\n self.assertEqual(eve_sync_after_ban.joined, [])\n mocked_get_prev_events = patch.object(self.hs.get_datastores().main,\n 'get_prev_events_for_room', new_callable=AsyncMock,\n return_value=[last_room_creation_event_id])\n with mocked_get_prev_events:\n self.helper.join(room_id, eve, tok=eve_token)\n eve_incremental_sync_after_join: SyncResult = self.get_success(self\n .sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=eve_sync_after_ban.next_batch))\n self.assertEqual(eve_incremental_sync_after_join.joined, [])\n eve_initial_sync_after_join: SyncResult = self.get_success(self.\n sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=None))\n self.assertEqual(eve_initial_sync_after_join.joined, [])\n\n\n<mask token>\n\n\ndef generate_sync_config(user_id: str, device_id: Optional[str]='device_id'\n ) ->SyncConfig:\n \"\"\"Generate a sync config (with a unique request key).\"\"\"\n global _request_key\n _request_key += 1\n return SyncConfig(user=UserID.from_string(user_id), filter_collection=\n Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False,\n request_key=('request_key', _request_key), device_id=device_id)\n",
"step-3": "<mask token>\n\n\nclass SyncTestCase(tests.unittest.HomeserverTestCase):\n \"\"\"Tests Sync Handler.\"\"\"\n servlets = [admin.register_servlets, knock.register_servlets, login.\n register_servlets, room.register_servlets]\n\n def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer\n ) ->None:\n self.sync_handler = self.hs.get_sync_handler()\n self.store = self.hs.get_datastores().main\n self.auth_blocking = self.hs.get_auth_blocking()\n\n def test_wait_for_sync_for_user_auth_blocking(self) ->None:\n user_id1 = '@user1:test'\n user_id2 = '@user2:test'\n sync_config = generate_sync_config(user_id1)\n requester = create_requester(user_id1)\n self.reactor.advance(100)\n self.auth_blocking._limit_usage_by_mau = True\n self.auth_blocking._max_mau_value = 1\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(self.sync_handler.wait_for_sync_for_user(requester,\n sync_config))\n self.auth_blocking._hs_disabled = True\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n self.auth_blocking._hs_disabled = False\n sync_config = generate_sync_config(user_id2)\n requester = create_requester(user_id2)\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n def test_unknown_room_version(self) ->None:\n \"\"\"\n A room with an unknown room version should not break sync (and should be excluded).\n \"\"\"\n inviter = self.register_user('creator', 'pass', admin=True)\n inviter_tok = self.login('@creator:test', 'pass')\n user = self.register_user('user', 'pass')\n tok = self.login('user', 'pass')\n requester = create_requester(user)\n initial_result = self.get_success(self.sync_handler.\n wait_for_sync_for_user(requester, sync_config=\n generate_sync_config(user, device_id='dev')))\n joined_room = self.helper.create_room_as(user, tok=tok)\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n knock_room = self.helper.create_room_as(inviter, room_version=\n RoomVersions.V7.identifier, tok=inviter_tok)\n self.helper.send_state(knock_room, EventTypes.JoinRules, {\n 'join_rule': JoinRules.KNOCK}, tok=inviter_tok)\n channel = self.make_request('POST', '/_matrix/client/r0/knock/%s' %\n (knock_room,), b'{}', tok)\n self.assertEqual(200, channel.code, channel.result)\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(self.hs.get_datastores().main.db_pool.\n simple_update('rooms', keyvalues={'room_id': room_id},\n updatevalues={'room_version': 'unknown-room-version'}, desc\n ='updated-room-version'))\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n def test_ban_wins_race_with_join(self) ->None:\n \"\"\"Rooms shouldn't appear under \"joined\" if a join loses a race to a ban.\n\n A complicated edge case. Imagine the following scenario:\n\n * you attempt to join a room\n * racing with that is a ban which comes in over federation, which ends up with\n an earlier stream_ordering than the join.\n * you get a sync response with a sync token which is _after_ the ban, but before\n the join\n * now your join lands; it is a valid event because its `prev_event`s predate the\n ban, but will not make it into current_state_events (because bans win over\n joins in state res, essentially).\n * When we do a sync from the incremental sync, the only event in the timeline\n is your join ... and yet you aren't joined.\n\n The ban coming in over federation isn't crucial for this behaviour; the key\n requirements are:\n 1. the homeserver generates a join event with prev_events that precede the ban\n (so that it passes the \"are you banned\" test)\n 2. the join event has a stream_ordering after that of the ban.\n\n We use monkeypatching to artificially trigger condition (1).\n \"\"\"\n owner = self.register_user('alice', 'password')\n owner_tok = self.login(owner, 'password')\n room_id = self.helper.create_room_as(owner, is_public=True, tok=\n owner_tok)\n alice_sync_result: SyncResult = self.get_success(self.sync_handler.\n wait_for_sync_for_user(create_requester(owner),\n generate_sync_config(owner)))\n self.assertEqual(len(alice_sync_result.joined), 1)\n self.assertEqual(alice_sync_result.joined[0].room_id, room_id)\n last_room_creation_event_id = alice_sync_result.joined[0\n ].timeline.events[-1].event_id\n eve = self.register_user('eve', 'password')\n eve_token = self.login(eve, 'password')\n self.helper.ban(room_id, owner, eve, tok=owner_tok)\n eve_requester = create_requester(eve)\n eve_sync_config = generate_sync_config(eve)\n eve_sync_after_ban: SyncResult = self.get_success(self.sync_handler\n .wait_for_sync_for_user(eve_requester, eve_sync_config))\n self.assertEqual(eve_sync_after_ban.joined, [])\n mocked_get_prev_events = patch.object(self.hs.get_datastores().main,\n 'get_prev_events_for_room', new_callable=AsyncMock,\n return_value=[last_room_creation_event_id])\n with mocked_get_prev_events:\n self.helper.join(room_id, eve, tok=eve_token)\n eve_incremental_sync_after_join: SyncResult = self.get_success(self\n .sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=eve_sync_after_ban.next_batch))\n self.assertEqual(eve_incremental_sync_after_join.joined, [])\n eve_initial_sync_after_join: SyncResult = self.get_success(self.\n sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=None))\n self.assertEqual(eve_initial_sync_after_join.joined, [])\n\n\n_request_key = 0\n\n\ndef generate_sync_config(user_id: str, device_id: Optional[str]='device_id'\n ) ->SyncConfig:\n \"\"\"Generate a sync config (with a unique request key).\"\"\"\n global _request_key\n _request_key += 1\n return SyncConfig(user=UserID.from_string(user_id), filter_collection=\n Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False,\n request_key=('request_key', _request_key), device_id=device_id)\n",
"step-4": "from typing import Optional\nfrom unittest.mock import AsyncMock, Mock, patch\nfrom twisted.test.proto_helpers import MemoryReactor\nfrom synapse.api.constants import EventTypes, JoinRules\nfrom synapse.api.errors import Codes, ResourceLimitError\nfrom synapse.api.filtering import Filtering\nfrom synapse.api.room_versions import RoomVersions\nfrom synapse.handlers.sync import SyncConfig, SyncResult\nfrom synapse.rest import admin\nfrom synapse.rest.client import knock, login, room\nfrom synapse.server import HomeServer\nfrom synapse.types import UserID, create_requester\nfrom synapse.util import Clock\nimport tests.unittest\nimport tests.utils\n\n\nclass SyncTestCase(tests.unittest.HomeserverTestCase):\n \"\"\"Tests Sync Handler.\"\"\"\n servlets = [admin.register_servlets, knock.register_servlets, login.\n register_servlets, room.register_servlets]\n\n def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer\n ) ->None:\n self.sync_handler = self.hs.get_sync_handler()\n self.store = self.hs.get_datastores().main\n self.auth_blocking = self.hs.get_auth_blocking()\n\n def test_wait_for_sync_for_user_auth_blocking(self) ->None:\n user_id1 = '@user1:test'\n user_id2 = '@user2:test'\n sync_config = generate_sync_config(user_id1)\n requester = create_requester(user_id1)\n self.reactor.advance(100)\n self.auth_blocking._limit_usage_by_mau = True\n self.auth_blocking._max_mau_value = 1\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(self.sync_handler.wait_for_sync_for_user(requester,\n sync_config))\n self.auth_blocking._hs_disabled = True\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n self.auth_blocking._hs_disabled = False\n sync_config = generate_sync_config(user_id2)\n requester = create_requester(user_id2)\n e = self.get_failure(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config), ResourceLimitError)\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n def test_unknown_room_version(self) ->None:\n \"\"\"\n A room with an unknown room version should not break sync (and should be excluded).\n \"\"\"\n inviter = self.register_user('creator', 'pass', admin=True)\n inviter_tok = self.login('@creator:test', 'pass')\n user = self.register_user('user', 'pass')\n tok = self.login('user', 'pass')\n requester = create_requester(user)\n initial_result = self.get_success(self.sync_handler.\n wait_for_sync_for_user(requester, sync_config=\n generate_sync_config(user, device_id='dev')))\n joined_room = self.helper.create_room_as(user, tok=tok)\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n knock_room = self.helper.create_room_as(inviter, room_version=\n RoomVersions.V7.identifier, tok=inviter_tok)\n self.helper.send_state(knock_room, EventTypes.JoinRules, {\n 'join_rule': JoinRules.KNOCK}, tok=inviter_tok)\n channel = self.make_request('POST', '/_matrix/client/r0/knock/%s' %\n (knock_room,), b'{}', tok)\n self.assertEqual(200, channel.code, channel.result)\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(self.hs.get_datastores().main.db_pool.\n simple_update('rooms', keyvalues={'room_id': room_id},\n updatevalues={'room_version': 'unknown-room-version'}, desc\n ='updated-room-version'))\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n result = self.get_success(self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\n 'dev'), since_token=initial_result.next_batch))\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n def test_ban_wins_race_with_join(self) ->None:\n \"\"\"Rooms shouldn't appear under \"joined\" if a join loses a race to a ban.\n\n A complicated edge case. Imagine the following scenario:\n\n * you attempt to join a room\n * racing with that is a ban which comes in over federation, which ends up with\n an earlier stream_ordering than the join.\n * you get a sync response with a sync token which is _after_ the ban, but before\n the join\n * now your join lands; it is a valid event because its `prev_event`s predate the\n ban, but will not make it into current_state_events (because bans win over\n joins in state res, essentially).\n * When we do a sync from the incremental sync, the only event in the timeline\n is your join ... and yet you aren't joined.\n\n The ban coming in over federation isn't crucial for this behaviour; the key\n requirements are:\n 1. the homeserver generates a join event with prev_events that precede the ban\n (so that it passes the \"are you banned\" test)\n 2. the join event has a stream_ordering after that of the ban.\n\n We use monkeypatching to artificially trigger condition (1).\n \"\"\"\n owner = self.register_user('alice', 'password')\n owner_tok = self.login(owner, 'password')\n room_id = self.helper.create_room_as(owner, is_public=True, tok=\n owner_tok)\n alice_sync_result: SyncResult = self.get_success(self.sync_handler.\n wait_for_sync_for_user(create_requester(owner),\n generate_sync_config(owner)))\n self.assertEqual(len(alice_sync_result.joined), 1)\n self.assertEqual(alice_sync_result.joined[0].room_id, room_id)\n last_room_creation_event_id = alice_sync_result.joined[0\n ].timeline.events[-1].event_id\n eve = self.register_user('eve', 'password')\n eve_token = self.login(eve, 'password')\n self.helper.ban(room_id, owner, eve, tok=owner_tok)\n eve_requester = create_requester(eve)\n eve_sync_config = generate_sync_config(eve)\n eve_sync_after_ban: SyncResult = self.get_success(self.sync_handler\n .wait_for_sync_for_user(eve_requester, eve_sync_config))\n self.assertEqual(eve_sync_after_ban.joined, [])\n mocked_get_prev_events = patch.object(self.hs.get_datastores().main,\n 'get_prev_events_for_room', new_callable=AsyncMock,\n return_value=[last_room_creation_event_id])\n with mocked_get_prev_events:\n self.helper.join(room_id, eve, tok=eve_token)\n eve_incremental_sync_after_join: SyncResult = self.get_success(self\n .sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=eve_sync_after_ban.next_batch))\n self.assertEqual(eve_incremental_sync_after_join.joined, [])\n eve_initial_sync_after_join: SyncResult = self.get_success(self.\n sync_handler.wait_for_sync_for_user(eve_requester,\n eve_sync_config, since_token=None))\n self.assertEqual(eve_initial_sync_after_join.joined, [])\n\n\n_request_key = 0\n\n\ndef generate_sync_config(user_id: str, device_id: Optional[str]='device_id'\n ) ->SyncConfig:\n \"\"\"Generate a sync config (with a unique request key).\"\"\"\n global _request_key\n _request_key += 1\n return SyncConfig(user=UserID.from_string(user_id), filter_collection=\n Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False,\n request_key=('request_key', _request_key), device_id=device_id)\n",
"step-5": "# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\nfrom unittest.mock import AsyncMock, Mock, patch\n\nfrom twisted.test.proto_helpers import MemoryReactor\n\nfrom synapse.api.constants import EventTypes, JoinRules\nfrom synapse.api.errors import Codes, ResourceLimitError\nfrom synapse.api.filtering import Filtering\nfrom synapse.api.room_versions import RoomVersions\nfrom synapse.handlers.sync import SyncConfig, SyncResult\nfrom synapse.rest import admin\nfrom synapse.rest.client import knock, login, room\nfrom synapse.server import HomeServer\nfrom synapse.types import UserID, create_requester\nfrom synapse.util import Clock\n\nimport tests.unittest\nimport tests.utils\n\n\nclass SyncTestCase(tests.unittest.HomeserverTestCase):\n \"\"\"Tests Sync Handler.\"\"\"\n\n servlets = [\n admin.register_servlets,\n knock.register_servlets,\n login.register_servlets,\n room.register_servlets,\n ]\n\n def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:\n self.sync_handler = self.hs.get_sync_handler()\n self.store = self.hs.get_datastores().main\n\n # AuthBlocking reads from the hs' config on initialization. We need to\n # modify its config instead of the hs'\n self.auth_blocking = self.hs.get_auth_blocking()\n\n def test_wait_for_sync_for_user_auth_blocking(self) -> None:\n user_id1 = \"@user1:test\"\n user_id2 = \"@user2:test\"\n sync_config = generate_sync_config(user_id1)\n requester = create_requester(user_id1)\n\n self.reactor.advance(100) # So we get not 0 time\n self.auth_blocking._limit_usage_by_mau = True\n self.auth_blocking._max_mau_value = 1\n\n # Check that the happy case does not throw errors\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(\n self.sync_handler.wait_for_sync_for_user(requester, sync_config)\n )\n\n # Test that global lock works\n self.auth_blocking._hs_disabled = True\n e = self.get_failure(\n self.sync_handler.wait_for_sync_for_user(requester, sync_config),\n ResourceLimitError,\n )\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n self.auth_blocking._hs_disabled = False\n\n sync_config = generate_sync_config(user_id2)\n requester = create_requester(user_id2)\n\n e = self.get_failure(\n self.sync_handler.wait_for_sync_for_user(requester, sync_config),\n ResourceLimitError,\n )\n self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n def test_unknown_room_version(self) -> None:\n \"\"\"\n A room with an unknown room version should not break sync (and should be excluded).\n \"\"\"\n inviter = self.register_user(\"creator\", \"pass\", admin=True)\n inviter_tok = self.login(\"@creator:test\", \"pass\")\n\n user = self.register_user(\"user\", \"pass\")\n tok = self.login(\"user\", \"pass\")\n\n # Do an initial sync on a different device.\n requester = create_requester(user)\n initial_result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user, device_id=\"dev\")\n )\n )\n\n # Create a room as the user.\n joined_room = self.helper.create_room_as(user, tok=tok)\n\n # Invite the user to the room as someone else.\n invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)\n self.helper.invite(invite_room, targ=user, tok=inviter_tok)\n\n knock_room = self.helper.create_room_as(\n inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok\n )\n self.helper.send_state(\n knock_room,\n EventTypes.JoinRules,\n {\"join_rule\": JoinRules.KNOCK},\n tok=inviter_tok,\n )\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/knock/%s\" % (knock_room,),\n b\"{}\",\n tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n\n # The rooms should appear in the sync response.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)\n )\n )\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n\n # Test a incremental sync (by providing a since_token).\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester,\n sync_config=generate_sync_config(user, device_id=\"dev\"),\n since_token=initial_result.next_batch,\n )\n )\n self.assertIn(joined_room, [r.room_id for r in result.joined])\n self.assertIn(invite_room, [r.room_id for r in result.invited])\n self.assertIn(knock_room, [r.room_id for r in result.knocked])\n\n # Poke the database and update the room version to an unknown one.\n for room_id in (joined_room, invite_room, knock_room):\n self.get_success(\n self.hs.get_datastores().main.db_pool.simple_update(\n \"rooms\",\n keyvalues={\"room_id\": room_id},\n updatevalues={\"room_version\": \"unknown-room-version\"},\n desc=\"updated-room-version\",\n )\n )\n\n # Blow away caches (supported room versions can only change due to a restart).\n self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()\n self.store.get_rooms_for_user.invalidate_all()\n self.store._get_event_cache.clear()\n self.store._event_ref.clear()\n\n # The rooms should be excluded from the sync response.\n # Get a new request key.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester, sync_config=generate_sync_config(user)\n )\n )\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n # The rooms should also not be in an incremental sync.\n result = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n requester,\n sync_config=generate_sync_config(user, device_id=\"dev\"),\n since_token=initial_result.next_batch,\n )\n )\n self.assertNotIn(joined_room, [r.room_id for r in result.joined])\n self.assertNotIn(invite_room, [r.room_id for r in result.invited])\n self.assertNotIn(knock_room, [r.room_id for r in result.knocked])\n\n def test_ban_wins_race_with_join(self) -> None:\n \"\"\"Rooms shouldn't appear under \"joined\" if a join loses a race to a ban.\n\n A complicated edge case. Imagine the following scenario:\n\n * you attempt to join a room\n * racing with that is a ban which comes in over federation, which ends up with\n an earlier stream_ordering than the join.\n * you get a sync response with a sync token which is _after_ the ban, but before\n the join\n * now your join lands; it is a valid event because its `prev_event`s predate the\n ban, but will not make it into current_state_events (because bans win over\n joins in state res, essentially).\n * When we do a sync from the incremental sync, the only event in the timeline\n is your join ... and yet you aren't joined.\n\n The ban coming in over federation isn't crucial for this behaviour; the key\n requirements are:\n 1. the homeserver generates a join event with prev_events that precede the ban\n (so that it passes the \"are you banned\" test)\n 2. the join event has a stream_ordering after that of the ban.\n\n We use monkeypatching to artificially trigger condition (1).\n \"\"\"\n # A local user Alice creates a room.\n owner = self.register_user(\"alice\", \"password\")\n owner_tok = self.login(owner, \"password\")\n room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)\n\n # Do a sync as Alice to get the latest event in the room.\n alice_sync_result: SyncResult = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n create_requester(owner), generate_sync_config(owner)\n )\n )\n self.assertEqual(len(alice_sync_result.joined), 1)\n self.assertEqual(alice_sync_result.joined[0].room_id, room_id)\n last_room_creation_event_id = (\n alice_sync_result.joined[0].timeline.events[-1].event_id\n )\n\n # Eve, a ne'er-do-well, registers.\n eve = self.register_user(\"eve\", \"password\")\n eve_token = self.login(eve, \"password\")\n\n # Alice preemptively bans Eve.\n self.helper.ban(room_id, owner, eve, tok=owner_tok)\n\n # Eve syncs.\n eve_requester = create_requester(eve)\n eve_sync_config = generate_sync_config(eve)\n eve_sync_after_ban: SyncResult = self.get_success(\n self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)\n )\n\n # Sanity check this sync result. We shouldn't be joined to the room.\n self.assertEqual(eve_sync_after_ban.joined, [])\n\n # Eve tries to join the room. We monkey patch the internal logic which selects\n # the prev_events used when creating the join event, such that the ban does not\n # precede the join.\n mocked_get_prev_events = patch.object(\n self.hs.get_datastores().main,\n \"get_prev_events_for_room\",\n new_callable=AsyncMock,\n return_value=[last_room_creation_event_id],\n )\n with mocked_get_prev_events:\n self.helper.join(room_id, eve, tok=eve_token)\n\n # Eve makes a second, incremental sync.\n eve_incremental_sync_after_join: SyncResult = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n eve_requester,\n eve_sync_config,\n since_token=eve_sync_after_ban.next_batch,\n )\n )\n # Eve should not see herself as joined to the room.\n self.assertEqual(eve_incremental_sync_after_join.joined, [])\n\n # If we did a third initial sync, we should _still_ see eve is not joined to the room.\n eve_initial_sync_after_join: SyncResult = self.get_success(\n self.sync_handler.wait_for_sync_for_user(\n eve_requester,\n eve_sync_config,\n since_token=None,\n )\n )\n self.assertEqual(eve_initial_sync_after_join.joined, [])\n\n\n_request_key = 0\n\n\ndef generate_sync_config(\n user_id: str, device_id: Optional[str] = \"device_id\"\n) -> SyncConfig:\n \"\"\"Generate a sync config (with a unique request key).\"\"\"\n global _request_key\n _request_key += 1\n return SyncConfig(\n user=UserID.from_string(user_id),\n filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,\n is_guest=False,\n request_key=(\"request_key\", _request_key),\n device_id=device_id,\n )\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
import sqlite3
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
# cursor.execute('CREATE TABLE users (id int, username text, password text)')
cursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))
users = [(2,'nika','asdf'),(3,'nino','sdfg')]
cursor.executemany('INSERT INTO users VALUES(?,?,?)', users)
for row in cursor.execute('SELECT * FROM users'):
print(row)
connection.commit()
connection.close()
|
normal
|
{
"blob_id": "d6b49533573dfefba6286ac2bffc2bd7a4075063",
"index": 1731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\n<mask token>\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-3": "<mask token>\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-4": "import sqlite3\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\ncursor.execute('INSERT INTO users VALUES(?,?,?)', (1, 'ilia', 'qwerty'))\nusers = [(2, 'nika', 'asdf'), (3, 'nino', 'sdfg')]\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\nconnection.commit()\nconnection.close()\n",
"step-5": "import sqlite3\n\nconnection = sqlite3.connect('database.db')\n\ncursor = connection.cursor()\n\n# cursor.execute('CREATE TABLE users (id int, username text, password text)')\n\ncursor.execute('INSERT INTO users VALUES(?,?,?)',(1,'ilia','qwerty'))\n\nusers = [(2,'nika','asdf'),(3,'nino','sdfg')]\n\ncursor.executemany('INSERT INTO users VALUES(?,?,?)', users)\n\nfor row in cursor.execute('SELECT * FROM users'):\n print(row)\n\nconnection.commit()\n\nconnection.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Phase transition module
"""
import utils
import datetime
import itertools
import numpy as np
import recovery as rec
import sampling as smp
import graphs_signals as gs
import pathos.multiprocessing as mp
from tqdm import tqdm
## MAIN FUNCTIONS ##
def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,
aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',
save_to_disk=True, save_each=1000, chunksize=1.):
r"""
Evaluates a grid of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list_one : array_like
List of values to test for the first parameter.
param_list_two : array_like, optional
List of values to test for the second parameter. Can be empty, in which case a
one-dimensional grid is evaluated.
param_eval : callable
Must take an instance of parameter values and return an object that can be evaluated
by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs
otherwise.
n_trials : int, optional
Number of trials to run for each parameter pair. (default is `16`)
aggr_method : callable, optional
The aggregation method for the values returned by `patam_eval` on different
trials for the same parameter pair. (default is :func:`numpy.mean`)
save_dir : string, optional
Directory onto which save the result. (default is 'data/')
file_name : string, optional
Optional name for the file. It is always prepended with the time stamp at the
end of the grid evaluation. (default is 'grid evaluation')
save_to_disk : bool, optional
Whether to save the experiment to disk (True) or not (False). (default is `True`)
save_each : int, optional
Save the experiment each time `save_each` grid points are computed. (default is `1000`)
chunksize : int
The size of the chunks of jobs sent to each parallel worker. (default is `1`)
Returns
-------
dict
A dictionary with the results of the experiment.
"""
if not list(param_list_two): # If `param_list_two` is empty
params = param_list_one
grid_shape = (len(param_list_one),)
is_really_grid = False
else:
params = list(itertools.product(param_list_one, param_list_two))
grid_shape = (len(param_list_one), len(param_list_two))
is_really_grid = True
def grid_fun(point): # Function to compute for each grid point
trial_out = np.nan * np.ones((n_trials,))
for i in np.arange(n_trials):
if is_really_grid:
trial_out[i] = param_eval(point[0], point[1])
else: # If `param_list_two` is empty
trial_out[i] = param_eval(point)
return aggr_method(trial_out)
n_grid_pts = len(params)
# Recording procedure
def record_experiment(grid):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
save_path = save_dir + now + ' ' + file_name + '.pkl'
experiment = {
'date': now,
'rows': param_list_one,
'cols': param_list_two,
'n_trials': n_trials,
'grid': np.reshape(grid, grid_shape),
'path': save_path
}
if save_to_disk:
utils.save_obj(experiment, save_path)
return experiment
# Set a pool of workers
nb_workers = min(mp.cpu_count(), 24)
print('Working with {} processes.'.format(nb_workers))
pool = mp.Pool(nb_workers)
# Iterate `grid_fun` across workers
it = pool.imap(grid_fun, params, chunksize=chunksize)
grid = np.nan * np.ones((n_grid_pts,))
for idx, val in enumerate(tqdm(it, total=n_grid_pts)):
grid[idx] = val
# Make sure that we save after each couple of iterations
if (idx >= save_each) and (idx % save_each == 0):
experiment = record_experiment(grid)
# Close pool
pool.close()
pool.join()
experiment = record_experiment(grid)
return experiment
def line_evaluation(param_list, param_eval, file_name='line evaluation', **kwargs):
r"""
Evaluates a list of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list : array_like
List of values to test for parameter of interest.
param_eval : callable
Must take a parameter instance and return an object that can be evaluated
by `aggr_meth` (see :func:`grid_evaluation`).
file_name : string, optional
Optional name for the file. (default is 'line evaluation')
Returns
-------
dict
A dictionary with the results of the experiment.
Notes
-----
You can also explicitely set the arguments in :func:`grid_evaluation` in this function
call.
"""
experiment = grid_evaluation(param_list_one=param_list,
param_list_two=[],
param_eval=param_eval,
file_name=file_name,
**kwargs)
experiment['line'] = experiment.pop('grid')
experiment['cols'] = experiment.pop('rows')
return experiment
|
normal
|
{
"blob_id": "d65f858c3ad06226b83d2627f6d38e03eae5b36c",
"index": 266,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-3": "<mask token>\n\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,\n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.0):\n \"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n if not list(param_list_two):\n params = param_list_one\n grid_shape = len(param_list_one),\n is_really_grid = False\n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = len(param_list_one), len(param_list_two)\n is_really_grid = True\n\n def grid_fun(point):\n trial_out = np.nan * np.ones((n_trials,))\n for i in np.arange(n_trials):\n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else:\n trial_out[i] = param_eval(point)\n return aggr_method(trial_out)\n n_grid_pts = len(params)\n\n def record_experiment(grid):\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {'date': now, 'rows': param_list_one, 'cols':\n param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,\n grid_shape), 'path': save_path}\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n if idx >= save_each and idx % save_each == 0:\n experiment = record_experiment(grid)\n pool.close()\n pool.join()\n experiment = record_experiment(grid)\n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-4": "<mask token>\nimport utils\nimport datetime\nimport itertools\nimport numpy as np\nimport recovery as rec\nimport sampling as smp\nimport graphs_signals as gs\nimport pathos.multiprocessing as mp\nfrom tqdm import tqdm\n\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,\n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.0):\n \"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n if not list(param_list_two):\n params = param_list_one\n grid_shape = len(param_list_one),\n is_really_grid = False\n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = len(param_list_one), len(param_list_two)\n is_really_grid = True\n\n def grid_fun(point):\n trial_out = np.nan * np.ones((n_trials,))\n for i in np.arange(n_trials):\n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else:\n trial_out[i] = param_eval(point)\n return aggr_method(trial_out)\n n_grid_pts = len(params)\n\n def record_experiment(grid):\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {'date': now, 'rows': param_list_one, 'cols':\n param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,\n grid_shape), 'path': save_path}\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n if idx >= save_each and idx % save_each == 0:\n experiment = record_experiment(grid)\n pool.close()\n pool.join()\n experiment = record_experiment(grid)\n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Phase transition module\n\n\"\"\"\n\n\nimport utils\nimport datetime\nimport itertools\n\nimport numpy as np\nimport recovery as rec\nimport sampling as smp\nimport graphs_signals as gs\nimport pathos.multiprocessing as mp\n\nfrom tqdm import tqdm\n\n\n## MAIN FUNCTIONS ##\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n r\"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **kwargs):\n r\"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n \n experiment = grid_evaluation(param_list_one=param_list,\n param_list_two=[],\n param_eval=param_eval,\n file_name=file_name,\n **kwargs)\n\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n \n return experiment\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Person:
country = "INDIA"
def __init__(self):
print("its base constructor")
def takeBreath(self):
print("Yes Iam breathing.")
class Emp(Person): # inherits person
def takeBreath(self):
print("Yes Iam EMP and Iam also breathing.")
class Prog(Emp):
def __init__(self):
super().__init__() # CALLS BASE CLASS CONTRUCTOR
print("its child constructor")
def takeBreath(self):
super().takeBreath() # calls previous class's method
print("Iam a programmer and breathing++.")
a=0
p = Person()
p.takeBreath()
e = Emp()
e.takeBreath()
pr = Prog()
pr.takeBreath()
|
normal
|
{
"blob_id": "cb2e2ef70935a22854c70fedf4f4a6715b089291",
"index": 1990,
"step-1": "<mask token>\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-3": "class Person:\n <mask token>\n <mask token>\n\n def takeBreath(self):\n print('Yes Iam breathing.')\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\n",
"step-4": "class Person:\n country = 'INDIA'\n\n def __init__(self):\n print('its base constructor')\n\n def takeBreath(self):\n print('Yes Iam breathing.')\n\n\nclass Emp(Person):\n\n def takeBreath(self):\n print('Yes Iam EMP and Iam also breathing.')\n\n\nclass Prog(Emp):\n\n def __init__(self):\n super().__init__()\n print('its child constructor')\n\n def takeBreath(self):\n super().takeBreath()\n print('Iam a programmer and breathing++.')\n a = 0\n\n\n<mask token>\np.takeBreath()\n<mask token>\ne.takeBreath()\n<mask token>\npr.takeBreath()\n",
"step-5": "class Person:\r\n country = \"INDIA\"\r\n def __init__(self):\r\n print(\"its base constructor\")\r\n\r\n def takeBreath(self):\r\n print(\"Yes Iam breathing.\")\r\n\r\nclass Emp(Person): # inherits person\r\n def takeBreath(self):\r\n print(\"Yes Iam EMP and Iam also breathing.\")\r\n\r\nclass Prog(Emp): \r\n\r\n def __init__(self):\r\n super().__init__() # CALLS BASE CLASS CONTRUCTOR\r\n print(\"its child constructor\")\r\n\r\n def takeBreath(self):\r\n super().takeBreath() # calls previous class's method \r\n print(\"Iam a programmer and breathing++.\")\r\n a=0\r\n\r\np = Person()\r\np.takeBreath()\r\n\r\ne = Emp()\r\ne.takeBreath()\r\n\r\npr = Prog()\r\npr.takeBreath()",
"step-ids": [
4,
6,
8,
11,
13
]
}
|
[
4,
6,
8,
11,
13
] |
'''
IplNorm.py
Description:
Normalizing 0 - 255 initial fingerprint to a normalized image.
Using energy normalization.
Input:
-image
Output:
-norm_im
@author: Edoardo Foco
'''
import cv2
import numpy as np
def normalise(image):
dbl_image = image.astype(float)
# calculate the mean of the image.
mean = np.mean(dbl_image)
# converting numpy 8-bit image to 8- bit cv2.iplimage
iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]), cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 * image.shape[1])
# initializing 32-bit floating point iplimage
image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.IPL_DEPTH_32F,1)
# converting 8-bit unsigned integer image to 32-bit floating point image
cv2.cv.CvtScale(iplImage,image_32F)
# energy Normalization. Formula: image = image/mean(image)
cv2.cv.ConvertScale(image_32F, image_32F, (1/mean), 0);
# re-converting to numpy image
norm_im = np.asarray(image_32F[:,:])
return norm_im
|
normal
|
{
"blob_id": "f51d85ff352d9c84a8ded29ad94b24ca6dda46ad",
"index": 7593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalise(image):\n dbl_image = image.astype(float)\n mean = np.mean(dbl_image)\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),\n cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *\n image.shape[1])\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.\n IPL_DEPTH_32F, 1)\n cv2.cv.CvtScale(iplImage, image_32F)\n cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)\n norm_im = np.asarray(image_32F[:, :])\n return norm_im\n",
"step-3": "<mask token>\nimport cv2\nimport numpy as np\n\n\ndef normalise(image):\n dbl_image = image.astype(float)\n mean = np.mean(dbl_image)\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),\n cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *\n image.shape[1])\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.\n IPL_DEPTH_32F, 1)\n cv2.cv.CvtScale(iplImage, image_32F)\n cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)\n norm_im = np.asarray(image_32F[:, :])\n return norm_im\n",
"step-4": "\n'''\nIplNorm.py\nDescription: \n Normalizing 0 - 255 initial fingerprint to a normalized image.\n Using energy normalization.\n \n Input:\n -image\n \n Output:\n -norm_im\n@author: Edoardo Foco\n'''\n\nimport cv2\nimport numpy as np\n\ndef normalise(image):\n \n dbl_image = image.astype(float)\n # calculate the mean of the image.\n mean = np.mean(dbl_image)\n \n # converting numpy 8-bit image to 8- bit cv2.iplimage\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]), cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 * image.shape[1])\n \n # initializing 32-bit floating point iplimage\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.IPL_DEPTH_32F,1)\n \n # converting 8-bit unsigned integer image to 32-bit floating point image\n cv2.cv.CvtScale(iplImage,image_32F)\n \n # energy Normalization. Formula: image = image/mean(image)\n cv2.cv.ConvertScale(image_32F, image_32F, (1/mean), 0);\n \n # re-converting to numpy image\n norm_im = np.asarray(image_32F[:,:])\n \n return norm_im",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2.6 on 2020-05-27 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pancar', '0006_auto_20200526_1058'),
]
operations = [
migrations.AlterField(
model_name='process',
name='price',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True),
),
]
|
normal
|
{
"blob_id": "316a34bbc2b3e3c818ef837f51bc1f86863ea59a",
"index": 2473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('pancar', '0006_auto_20200526_1058')]\n operations = [migrations.AlterField(model_name='process', name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('pancar', '0006_auto_20200526_1058')]\n operations = [migrations.AlterField(model_name='process', name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-05-27 19:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pancar', '0006_auto_20200526_1058'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='process',\n name='price',\n field=models.DecimalField(decimal_places=1, max_digits=5, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""
The main service module
MIT License
Copyright (c) 2017-2020, Leo Moll
"""
# -- Imports ------------------------------------------------
from resources.lib.service import MediathekViewService
# -- Main Code ----------------------------------------------
if __name__ == '__main__':
SERVICE = MediathekViewService()
SERVICE.init()
SERVICE.run()
SERVICE.exit()
del SERVICE
|
normal
|
{
"blob_id": "e769e930ab8f0356116679bc38a09b83886eb8f6",
"index": 4003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-3": "<mask token>\nfrom resources.lib.service import MediathekViewService\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-4": "# -*- coding: utf-8 -*-\n# SPDX-License-Identifier: MIT\n\"\"\"\nThe main service module\n\nMIT License\n\nCopyright (c) 2017-2020, Leo Moll\n\"\"\"\n\n\n\n# -- Imports ------------------------------------------------\nfrom resources.lib.service import MediathekViewService\n\n# -- Main Code ----------------------------------------------\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message.
# If the score is between 0.0 and 1.0, print a grade using the following table:
# Score Grade
# >= 0.9 A
# >= 0.8 B
# >= 0.7 C
# >= 0.6 D
# < 0.6 F
# Vinayak Nayak
# 27th December 2018
# 12:30 pm
try:
i = float(input("Enter the score : "))
if(i > 1 or i < 0):
print("Entered score isn't valid.")
else:
if (i < 0.6):
print("Grade: F")
elif (i < 0.7):
print("Grade: D")
elif (i < 0.8):
print("Grade: C")
elif (i < 0.9):
print("Grade: B")
elif (i <= 1.0):
print("Grade: A")
except Exception as e:
print(str(e))
|
normal
|
{
"blob_id": "6f253da5dc1caa504a3a8aadae7bce6537b5c8c6",
"index": 6237,
"step-1": "<mask token>\n",
"step-2": "try:\n i = float(input('Enter the score : '))\n if i > 1 or i < 0:\n print(\"Entered score isn't valid.\")\n elif i < 0.6:\n print('Grade: F')\n elif i < 0.7:\n print('Grade: D')\n elif i < 0.8:\n print('Grade: C')\n elif i < 0.9:\n print('Grade: B')\n elif i <= 1.0:\n print('Grade: A')\nexcept Exception as e:\n print(str(e))\n",
"step-3": "# Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message.\n# If the score is between 0.0 and 1.0, print a grade using the following table:\n# Score Grade\n# >= 0.9 A\n# >= 0.8 B\n# >= 0.7 C\n# >= 0.6 D\n# < 0.6 F\n\n# Vinayak Nayak\n# 27th December 2018\n# 12:30 pm\n\ntry:\n i = float(input(\"Enter the score : \"))\n\n if(i > 1 or i < 0):\n print(\"Entered score isn't valid.\")\n else:\n if (i < 0.6):\n print(\"Grade: F\")\n\n elif (i < 0.7):\n print(\"Grade: D\")\n\n elif (i < 0.8):\n print(\"Grade: C\")\n\n elif (i < 0.9):\n print(\"Grade: B\")\n\n elif (i <= 1.0):\n print(\"Grade: A\")\n\nexcept Exception as e:\n print(str(e))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
def paragraph_spacing():
doc = SimpleDocTemplate("paragraph_spacing.pdf", pagesize=letter)
styles = getSampleStyleSheet()
#Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>
styles["Normal"].spaceBefore = 10
styles["Normal"].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
doc.build(flowables)
if __name__ == "__main__":
paragraph_spacing()
|
normal
|
{
"blob_id": "d79e65b7aa09066230dec1a472f4535dff4123b5",
"index": 4217,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n",
"step-4": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n",
"step-5": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate(\"paragraph_spacing.pdf\", pagesize=letter)\n\n styles = getSampleStyleSheet()\n #Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>\n styles[\"Normal\"].spaceBefore = 10\n styles[\"Normal\"].spaceAfter = 10\n\n flowables = []\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n doc.build(flowables)\n\n\nif __name__ == \"__main__\":\n paragraph_spacing()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def calcLuckyNumber(x):
resultSet = set()
for i in range(30):
for j in range(30):
for k in range(30):
number = pow(3, i) * pow(5, j) * pow(7, k)
if number > 1 and number <= x:
resultSet.add(number)
return resultSet
x = input("input number: ")
if x != '':
x = int(x)
if x > 0:
result = calcLuckyNumber(x)
print(len(result))
|
normal
|
{
"blob_id": "49a9fb43f3651d28d3ffac5e33d10c428afd08fd",
"index": 6072,
"step-1": "<mask token>\n",
"step-2": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\n<mask token>\n",
"step-3": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\n<mask token>\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-4": "def calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n return resultSet\n\n\nx = input('input number: ')\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef calcLuckyNumber(x):\n resultSet = set()\n for i in range(30):\n for j in range(30):\n for k in range(30):\n number = pow(3, i) * pow(5, j) * pow(7, k)\n if number > 1 and number <= x:\n resultSet.add(number)\n\n return resultSet\n\nx = input(\"input number: \")\nif x != '':\n x = int(x)\n if x > 0:\n result = calcLuckyNumber(x)\n print(len(result))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import socket
import json
import numpy as np
"""TCP client used to communicate with the Unity Application"""
class TCP:
def __init__(self, sock = None):
# Create a TCP socket
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
server_address = (host, port)
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : value, "convergence" : convergence})).encode()
print("Sending value {} as data {}".format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : gain, "radius": radius, "convergence" : convergence})).encode()
print("Sending value ({}, {}) as data {}".format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
# Convert bytes to float
data = self.sock.recv(1024)
print("Received: {}".format(data))
value = json.loads(data)
return value
def close(self):
print("Closing socket")
self.sock.close()
|
normal
|
{
"blob_id": "cc66dcd34115e72479953ca24f4b2eaeb52cf313",
"index": 7747,
"step-1": "<mask token>\n\n\nclass TCP:\n <mask token>\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n <mask token>\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-2": "<mask token>\n\n\nclass TCP:\n <mask token>\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-3": "<mask token>\n\n\nclass TCP:\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-4": "import socket\nimport json\nimport numpy as np\n<mask token>\n\n\nclass TCP:\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-5": "import socket\nimport json\nimport numpy as np\n\n\"\"\"TCP client used to communicate with the Unity Application\"\"\"\n\nclass TCP:\n def __init__(self, sock = None):\n # Create a TCP socket\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = (host, port)\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n # dump to json format\n data = json.dumps(dict({\"gain\" : value, \"convergence\" : convergence})).encode()\n print(\"Sending value {} as data {}\".format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n # dump to json format\n data = json.dumps(dict({\"gain\" : gain, \"radius\": radius, \"convergence\" : convergence})).encode()\n print(\"Sending value ({}, {}) as data {}\".format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n # Convert bytes to float\n data = self.sock.recv(1024)\n print(\"Received: {}\".format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print(\"Closing socket\")\n self.sock.close()",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
__author__ = 'samar'
import mv_details
import product
|
normal
|
{
"blob_id": "7ac53779a98b6e4b236b1e81742163d2c610a274",
"index": 4556,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'samar'\n<mask token>\n",
"step-3": "__author__ = 'samar'\nimport mv_details\nimport product\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import http.cookies
import json
import os
import itertools
import types
from framework import helpers
from framework import security
class Model:
"""Manages the information received by the client"""
def __init__(self):
"""Puth the os.environ dict into the namespace"""
self.__dict__.update(
itertools.starmap(
lambda key, value: (
key[0].lower() + # upper case the first letter and add
key.title() # title case all text
.replace('_', '') # remove undersore
[1:] # all text without the first char
, value
) #lambda
,os.environ.items()
) #itertools.starmap
) #update
@property
def form(self):
"""Contains the data send from the client."""
return security.get_field_storage()
@property
def cookie(self):
"""The client cookie"""
return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))
@property
def url(self):
"""The url of request"""
url = os.environ.get('PATH_INFO')\
or os.environ.get('REQUEST_URI')
return url if url else ''
@property
def serverProtocol(self):
"""The server protocol"""
serverProtocol = os.environ.get('SERVER_PROTOCOL')
return serverProtocol if serverProtocol else 'HTTP/1.1'
@property
def protocol(self):
"""Te protocol (HTTP or HTTPS)"""
return helpers.get_protocol()
@property
def ip(self):
"""The ip of the client"""
return os.environ.get('REMOTE_ADDR')
|
normal
|
{
"blob_id": "7f21ab8d332d169226ef17276abbdd373e3a62c2",
"index": 8544,
"step-1": "<mask token>\n\n\nclass Model:\n <mask token>\n <mask token>\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-2": "<mask token>\n\n\nclass Model:\n <mask token>\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-3": "<mask token>\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-4": "import http.cookies\nimport json\nimport os\nimport itertools\nimport types\nfrom framework import helpers\nfrom framework import security\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(itertools.starmap(lambda key, value: (key[0].\n lower() + key.title().replace('_', '')[1:], value), os.environ.\n items()))\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO') or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n",
"step-5": "import http.cookies\nimport json\nimport os\nimport itertools\nimport types\n\nfrom framework import helpers\nfrom framework import security\n\n\nclass Model:\n \"\"\"Manages the information received by the client\"\"\"\n\n def __init__(self):\n \"\"\"Puth the os.environ dict into the namespace\"\"\"\n self.__dict__.update(\n itertools.starmap(\n lambda key, value: (\n key[0].lower() + # upper case the first letter and add\n key.title() # title case all text\n .replace('_', '') # remove undersore\n [1:] # all text without the first char\n , value\n ) #lambda\n ,os.environ.items()\n ) #itertools.starmap\n ) #update\n\n @property\n def form(self):\n \"\"\"Contains the data send from the client.\"\"\"\n return security.get_field_storage()\n\n @property\n def cookie(self):\n \"\"\"The client cookie\"\"\"\n return http.cookies.SimpleCookie(os.environ.get('HTTP_COOKIE'))\n\n @property\n def url(self):\n \"\"\"The url of request\"\"\"\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''\n\n @property\n def serverProtocol(self):\n \"\"\"The server protocol\"\"\"\n serverProtocol = os.environ.get('SERVER_PROTOCOL')\n return serverProtocol if serverProtocol else 'HTTP/1.1'\n\n @property\n def protocol(self):\n \"\"\"Te protocol (HTTP or HTTPS)\"\"\"\n return helpers.get_protocol()\n\n @property\n def ip(self):\n \"\"\"The ip of the client\"\"\"\n return os.environ.get('REMOTE_ADDR')\n ",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
import json
import math
import rospy
import sys
import RPi.GPIO as GPIO
from std_msgs.msg import Float32
from geometry_msgs.msg import Point32
from time import sleep
#pulse width of difference rotations
d_45 = 1.0
d_90 = 1.5
d_180 = 2.5
frequency = 50.0
t_per_cycle = (1.0 / frequency) * 1000.0
#convert to duty cycles
duty_45 = (d_45 / t_per_cycle) * 100.0
duty_90 = (d_90 / t_per_cycle) * 100.0
duty_180 = (d_180 / t_per_cycle) * 100.0
#gear spec
radius = 2.25
cir = 2.0 * radius * math.pi
d = cir / 20.0
cm_theta = 18.0 / d
z_radius = 1.0
z_cir = 2.0 * z_radius * math.pi
z_d = z_cir / 10.0
z_cm_theta = 36.0 / d
class Servo_node:
def __init__(self):
rospy.init_node('servo_node', anonymous=False)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Setting up for pin 12. Make sure to adjust for your own needs
motor_x = 13
motor_y = 12
motor_z = 20
GPIO.setup(motor_x, GPIO.OUT)
GPIO.setup(motor_y, GPIO.OUT)
GPIO.setup(motor_z, GPIO.OUT)
# 0.75-2.75
self.pwm_x = GPIO.PWM(motor_x, frequency)
# 2-3
self.pwm_y = GPIO.PWM(motor_y, frequency)
# 0.8-1.8
self.pwm_z = GPIO.PWM(motor_z, frequency)
#set start position to (0,0)
self.pwm_z.start(duty_45)
sleep(0.5)
self.pwm_z.ChangeDutyCycle(0)
self.pwm_x.start(duty_180)
sleep(0.5)
self.pwm_x.ChangeDutyCycle(0)
self.pwm_y.start(duty_45)
sleep(0.5)
self.pwm_y.ChangeDutyCycle(0)
#topic takes angle as message
self.sub_x = rospy.Subscriber("/servo_ctrl/s1", Float32, self.set_servo_x_angle)
self.sub_y = rospy.Subscriber("/servo_ctrl/s2", Float32, self.set_servo_y_angle)
self.sub_z = rospy.Subscriber("/servo_ctrl/s3", Float32, self.set_servo_z_angle)
#topic for position commands
self.pos_sub = rospy.Subscriber("/servo_ctrl/pos", Point32, self.set_coordinate)
def set_servo_x_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))# Note tha this does not correspond to angle
sleep(1)
self.pwm_x.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_y_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_y.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_z_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_z.ChangeDutyCycle(0)
sleep(0.5)
def set_coordinate(self, msg):
#conversion between coordinate to motor angles
rospy.loginfo("setting position")
#correction for motors
#offset added to make sure the touch probe is at (0,0) initially
#may need to change depends on your motor
x_offset = 0
y_offset = -5
z_offset = 0
x = msg.x
y = msg.y
z = msg.z
z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)
x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)
y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)
x_angle = 180 - x * cm_theta + x_offset
y_angle = 45 + y * cm_theta + y_offset
z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset
if x == -1 or y == -1 or z == -1:
if x == -1:
self.pwm_x.ChangeDutyCycle(0)
else:
x_pub.publish(Float32(x_angle))
if y == -1:
self.pwm_y.ChangeDutyCycle(0)
else:
y_pub.publish(Float32(y_angle))
if z == -1:
self.pwm_z.ChangeDutyCycle(0)
else:
z_pub.publish(Float32(z_angle))
elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:
# z_pub.publish(Float32(45))
x_pub.publish(Float32(x_angle))
y_pub.publish(Float32(y_angle))
z_pub.publish(Float32(z_angle))
def saturate_input(self, angle):
#conversion from angle to duty cycles
print(angle)
pw_per_deg = (duty_180 - duty_90) / 90;
duty = pw_per_deg * (angle - 45) + duty_45
print(duty)
return max(min(duty,100),0)
def main_loop():
rate = rospy.Rate(10) # 10Hz
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
servo = Servo_node()
main_loop()
|
normal
|
{
"blob_id": "95845aeb47e0d2c579739767ece35f4134564d98",
"index": 7717,
"step-1": "<mask token>\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\n<mask token>\n",
"step-3": "<mask token>\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\nfrequency = 50.0\nt_per_cycle = 1.0 / frequency * 1000.0\nduty_45 = d_45 / t_per_cycle * 100.0\nduty_90 = d_90 / t_per_cycle * 100.0\nduty_180 = d_180 / t_per_cycle * 100.0\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\nif __name__ == '__main__':\n servo = Servo_node()\n main_loop()\n",
"step-4": "import json\nimport math\nimport rospy\nimport sys\nimport RPi.GPIO as GPIO\nfrom std_msgs.msg import Float32\nfrom geometry_msgs.msg import Point32\nfrom time import sleep\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\nfrequency = 50.0\nt_per_cycle = 1.0 / frequency * 1000.0\nduty_45 = d_45 / t_per_cycle * 100.0\nduty_90 = d_90 / t_per_cycle * 100.0\nduty_180 = d_180 / t_per_cycle * 100.0\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\nif __name__ == '__main__':\n servo = Servo_node()\n main_loop()\n",
"step-5": "import json\nimport math\nimport rospy\nimport sys\nimport RPi.GPIO as GPIO\nfrom std_msgs.msg import Float32\nfrom geometry_msgs.msg import Point32\nfrom time import sleep\n\n#pulse width of difference rotations\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\n\nfrequency = 50.0\nt_per_cycle = (1.0 / frequency) * 1000.0\n\n#convert to duty cycles\nduty_45 = (d_45 / t_per_cycle) * 100.0\nduty_90 = (d_90 / t_per_cycle) * 100.0\nduty_180 = (d_180 / t_per_cycle) * 100.0\n\n#gear spec\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\n\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\n\nclass Servo_node:\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n # Setting up for pin 12. Make sure to adjust for your own needs\n motor_x = 13\n motor_y = 12\n motor_z = 20\n\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n # 0.75-2.75\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n # 2-3\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n # 0.8-1.8\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n #set start position to (0,0)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n #topic takes angle as message\n self.sub_x = rospy.Subscriber(\"/servo_ctrl/s1\", Float32, self.set_servo_x_angle)\n self.sub_y = rospy.Subscriber(\"/servo_ctrl/s2\", Float32, self.set_servo_y_angle)\n self.sub_z = rospy.Subscriber(\"/servo_ctrl/s3\", Float32, self.set_servo_z_angle)\n #topic for position commands\n self.pos_sub = rospy.Subscriber(\"/servo_ctrl/pos\", Point32, self.set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))# Note tha this does not correspond to angle\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n def set_coordinate(self, msg):\n #conversion between coordinate to motor angles\n rospy.loginfo(\"setting position\")\n #correction for motors\n #offset added to make sure the touch probe is at (0,0) initially\n #may need to change depends on your motor\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n # z_pub.publish(Float32(45))\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n #conversion from angle to duty cycles\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90;\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty,100),0)\n\n\ndef main_loop():\n rate = rospy.Rate(10) # 10Hz\n while not rospy.is_shutdown():\n rate.sleep()\n\nif __name__ == \"__main__\":\n servo = Servo_node()\n main_loop()\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.sources.models.mixins.page_numbers import PageNumbersMixin
from apps.sources.models.source import Source
PIECE_TYPES = (('essay', 'Essay'),)
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(
verbose_name=_('piece type'),
max_length=TYPE_MAX_LENGTH,
choices=PIECE_TYPES,
default=PIECE_TYPES[0][0],
)
def __html__(self) -> str:
"""Return the piece's citation HTML string."""
components = [
self.attributee_html,
f'"{self.linked_title}"',
self.date.string if self.date else '',
]
return self.components_to_html(components)
|
normal
|
{
"blob_id": "30c24b9a4738c1952fc5d36a4bc36d8d3576ed3b",
"index": 7201,
"step-1": "<mask token>\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-2": "<mask token>\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-3": "<mask token>\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\nPIECE_TYPES = ('essay', 'Essay'),\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n type = models.CharField(verbose_name=_('piece type'), max_length=\n TYPE_MAX_LENGTH, choices=PIECE_TYPES, default=PIECE_TYPES[0][0])\n\n def __html__(self) ->str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [self.attributee_html, f'\"{self.linked_title}\"', self.\n date.string if self.date else '']\n return self.components_to_html(components)\n",
"step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.sources.models.mixins.page_numbers import PageNumbersMixin\nfrom apps.sources.models.source import Source\n\nPIECE_TYPES = (('essay', 'Essay'),)\nTYPE_MAX_LENGTH: int = 10\n\n\nclass Piece(Source, PageNumbersMixin):\n \"\"\"A piece (e.g., essay).\"\"\"\n\n type = models.CharField(\n verbose_name=_('piece type'),\n max_length=TYPE_MAX_LENGTH,\n choices=PIECE_TYPES,\n default=PIECE_TYPES[0][0],\n )\n\n def __html__(self) -> str:\n \"\"\"Return the piece's citation HTML string.\"\"\"\n components = [\n self.attributee_html,\n f'\"{self.linked_title}\"',\n self.date.string if self.date else '',\n ]\n return self.components_to_html(components)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board or not board[0]: return not word
self.length = len(word)
def hasPathCore(row, col, depth=0):
if self.length == depth:
return True
hasPath = False
if 0 <= row and row < len(board) and \
0 <= col and col < len(board[0]) and \
board[row][col] == word[depth] and \
not visited[row][col]:
visited[row][col] = True
up = hasPathCore(row - 1, col, depth + 1)
down = hasPathCore(row + 1, col, depth + 1)
left = hasPathCore(row, col - 1, depth + 1)
right = hasPathCore(row, col + 1, depth + 1)
hasPath = up or down or left or right
if not hasPath:
visited[row][col] = False
return hasPath
visited = [[False] * len(board[0]) for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if hasPathCore(i, j, 0): return True
return False
# python, dfs解法
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
def dfs(i, j, word, visited=set()):
# Base case
if not word:
return True
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
# 搜索相邻的,且没有被访问过的位置
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
# 这个位置字符和word开头对上了
if board[ni][nj] == word[0]:
# 在下一层中,找到了一个成功的方向,即刻返回true
if dfs(ni, nj, word[1:], visited | {(ni, nj)}):
return True
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
# 开头对上了,进入下一层寻找
if board[i][j] == word[0]:
# 剩下的依然匹配,则返回true
if dfs(i, j, word[1:], set([(i, j)])):
return True
return False
|
normal
|
{
"blob_id": "9b8db3407313a3e39d429b7c10897fc447fcdc27",
"index": 1337,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n\n def dfs(i, j, word, visited=set()):\n if not word:\n return True\n for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):\n if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:\n if board[ni][nj] == word[0]:\n if dfs(ni, nj, word[1:], visited | {(ni, nj)}):\n return True\n return False\n m, n = len(board), len(board[0])\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n if dfs(i, j, word[1:], set([(i, j)])):\n return True\n return False\n",
"step-3": "class Solution(object):\n <mask token>\n\n\nclass Solution(object):\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n\n def dfs(i, j, word, visited=set()):\n if not word:\n return True\n for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):\n if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:\n if board[ni][nj] == word[0]:\n if dfs(ni, nj, word[1:], visited | {(ni, nj)}):\n return True\n return False\n m, n = len(board), len(board[0])\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n if dfs(i, j, word[1:], set([(i, j)])):\n return True\n return False\n",
"step-4": "class Solution(object):\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n if not board or not board[0]:\n return not word\n self.length = len(word)\n\n def hasPathCore(row, col, depth=0):\n if self.length == depth:\n return True\n hasPath = False\n if 0 <= row and row < len(board) and 0 <= col and col < len(board\n [0]) and board[row][col] == word[depth] and not visited[row][\n col]:\n visited[row][col] = True\n up = hasPathCore(row - 1, col, depth + 1)\n down = hasPathCore(row + 1, col, depth + 1)\n left = hasPathCore(row, col - 1, depth + 1)\n right = hasPathCore(row, col + 1, depth + 1)\n hasPath = up or down or left or right\n if not hasPath:\n visited[row][col] = False\n return hasPath\n visited = [([False] * len(board[0])) for _ in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[0])):\n if hasPathCore(i, j, 0):\n return True\n return False\n\n\nclass Solution(object):\n\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n\n def dfs(i, j, word, visited=set()):\n if not word:\n return True\n for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):\n if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:\n if board[ni][nj] == word[0]:\n if dfs(ni, nj, word[1:], visited | {(ni, nj)}):\n return True\n return False\n m, n = len(board), len(board[0])\n for i in range(m):\n for j in range(n):\n if board[i][j] == word[0]:\n if dfs(i, j, word[1:], set([(i, j)])):\n return True\n return False\n",
"step-5": "class Solution(object):\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n if not board or not board[0]: return not word\n self.length = len(word)\n def hasPathCore(row, col, depth=0):\n if self.length == depth:\n return True\n hasPath = False\n if 0 <= row and row < len(board) and \\\n 0 <= col and col < len(board[0]) and \\\n board[row][col] == word[depth] and \\\n not visited[row][col]:\n visited[row][col] = True\n up = hasPathCore(row - 1, col, depth + 1)\n down = hasPathCore(row + 1, col, depth + 1)\n left = hasPathCore(row, col - 1, depth + 1)\n right = hasPathCore(row, col + 1, depth + 1)\n hasPath = up or down or left or right\n if not hasPath:\n visited[row][col] = False\n return hasPath\n \n visited = [[False] * len(board[0]) for _ in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[0])):\n if hasPathCore(i, j, 0): return True\n return False\n\n# python, dfs解法\nclass Solution(object):\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n def dfs(i, j, word, visited=set()):\n # Base case\n if not word:\n return True\n for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):\n # 搜索相邻的,且没有被访问过的位置\n if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:\n # 这个位置字符和word开头对上了\n if board[ni][nj] == word[0]:\n # 在下一层中,找到了一个成功的方向,即刻返回true\n if dfs(ni, nj, word[1:], visited | {(ni, nj)}):\n return True\n return False\n\n m, n = len(board), len(board[0])\n for i in range(m):\n for j in range(n):\n # 开头对上了,进入下一层寻找\n if board[i][j] == word[0]:\n # 剩下的依然匹配,则返回true\n if dfs(i, j, word[1:], set([(i, j)])):\n return True\n return False\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 2.2.3 on 2019-07-18 06:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('juchu', '0003_auto_20190718_1500'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='product',
),
migrations.RemoveField(
model_name='order',
name='quantity',
),
migrations.CreateModel(
name='OrderProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Product')),
],
),
]
|
normal
|
{
"blob_id": "b0174b6f6c33434ff9b5cdb59531502899d8348a",
"index": 4262,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('juchu', '0003_auto_20190718_1500')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.RemoveField(model_name='order', name='quantity'),\n migrations.CreateModel(name='OrderProduct', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('quantity', models.IntegerField(default=1)),\n ('order', models.ForeignKey(on_delete=django.db.models.deletion.\n PROTECT, to='juchu.Order')), ('product', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='juchu.Product'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('juchu', '0003_auto_20190718_1500')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.RemoveField(model_name='order', name='quantity'),\n migrations.CreateModel(name='OrderProduct', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('quantity', models.IntegerField(default=1)),\n ('order', models.ForeignKey(on_delete=django.db.models.deletion.\n PROTECT, to='juchu.Order')), ('product', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='juchu.Product'))])]\n",
"step-5": "# Generated by Django 2.2.3 on 2019-07-18 06:05\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('juchu', '0003_auto_20190718_1500'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='order',\r\n name='product',\r\n ),\r\n migrations.RemoveField(\r\n model_name='order',\r\n name='quantity',\r\n ),\r\n migrations.CreateModel(\r\n name='OrderProduct',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('quantity', models.IntegerField(default=1)),\r\n ('order', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Order')),\r\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Product')),\r\n ],\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
##!/work/local/bin/python
##!/work/local/CDAT/bin/python
import sys,getopt
import matplotlib.pyplot as plt
def read():
x = []
y = []
for line in sys.stdin:
v1,v2 = line.split()[:2]
x.append(float(v1))
y.append(float(v2))
return x,y
#def plot(x,y):
def plot(x,y,xlabel,ylabel,title,fn):
fig = plt.figure( figsize=(6.0,6.0) )
ax = fig.add_subplot(111)
ax.grid(True)
if title:
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plot = ax.scatter( x, y, s=3, marker='o' )
mx = max(x)
mn = min(x)
plot = ax.plot( [mn,mx], [mn,mx] , 'r-')
if fn:
fname = fn
else:
fname = 'TMP_scat.png'
fig.savefig( fname, format='png' )
print 'WROTE --> %s' % fname
######################################
use = '''
Usage: %s
-h help
'''
if __name__ == '__main__':
def usage():
sys.stderr.write(use % sys.argv[0])
sys.exit(1)
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hx:y:o:t:')
except getopt.error:
usage()
fn = ''
x = 'X'
y = 'Y'
title = ''
for (opt,val) in opts:
if opt == '-x':
x = val
elif opt == '-y':
y = val
elif opt == '-t':
title = val
elif opt == '-o':
fn = val
else:
raise OptionError, opt
usage()
#if len(args) != 1:
# usage()
#fn = args[0]
xv,yv = read()
plot(xv,yv,x,y,title,fn)
|
normal
|
{
"blob_id": "b16ad4bae079159da7ef88b61081d7763d4ae9a0",
"index": 8312,
"step-1": "#!/usr/bin/env python\n##!/work/local/bin/python\n##!/work/local/CDAT/bin/python\n\nimport sys,getopt\nimport matplotlib.pyplot as plt\n\n\ndef read():\n\n x = []\n y = []\n for line in sys.stdin:\n v1,v2 = line.split()[:2]\n x.append(float(v1))\n y.append(float(v2))\n return x,y\n\n\n#def plot(x,y):\ndef plot(x,y,xlabel,ylabel,title,fn):\n\n fig = plt.figure( figsize=(6.0,6.0) )\n ax = fig.add_subplot(111)\n ax.grid(True)\n if title:\n ax.set_title(title)\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n plot = ax.scatter( x, y, s=3, marker='o' )\n\n mx = max(x)\n mn = min(x)\n plot = ax.plot( [mn,mx], [mn,mx] , 'r-')\n\n if fn:\n fname = fn\n else:\n fname = 'TMP_scat.png'\n fig.savefig( fname, format='png' )\n print 'WROTE --> %s' % fname\n\n\n\n######################################\nuse = '''\nUsage: %s \n\n -h help\n\n'''\nif __name__ == '__main__':\n\n def usage():\n sys.stderr.write(use % sys.argv[0])\n sys.exit(1)\n\n try:\n (opts, args) = getopt.getopt(sys.argv[1:], 'hx:y:o:t:')\n except getopt.error:\n usage()\n\n fn = ''\n x = 'X'\n y = 'Y'\n title = ''\n for (opt,val) in opts:\n if opt == '-x':\n x = val\n elif opt == '-y':\n y = val\n elif opt == '-t':\n title = val\n elif opt == '-o':\n fn = val\n else:\n raise OptionError, opt\n usage()\n\n #if len(args) != 1:\n # usage()\n #fn = args[0]\n\n xv,yv = read()\n plot(xv,yv,x,y,title,fn)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding: utf-8
"""
Styled object
=============
A :class:`~benker.styled.Styled` object contains a dictionary of styles.
It is mainly used for :class:`~benker.table.Table`, :class:`~benker.table.RowView`,
:class:`~benker.table.ColView`, and :class:`~benker.cell.Cell`.
"""
import pprint
class Styled(object):
"""
Styled object, like Table, Row, Column, or Cell objects.
A styled object stores user-defined styles: a dictionary of key-value pairs.
This values are useful to store some HTML-like styles (border-style,
border-width, border-color, vertical-align, text-align, etc.).
Of course, we are not tied to the HTML-like styles, you can use your
own list of styles.
.. note::
The style dictionary is always copied: in other words, key-value pairs
are copied but a shallow copy is done for the values (in general, it
is not a problem if you use non-mutable values like :class:`str`).
A styled object stores a nature: a way to distinguish the body cells,
from the header and the footer. The default value is ``None``, but you can
use "body", "header", "footer" or whatever is suitable for your needs.
This kind of information is in general not stored in the styles,
even if it is similar.
Tables can also have a *nature*, similar to HTML ``@class`` attribute,
you can use it do identify the styles to apply to your table.
.. note::
In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`
of two natures is done by keeping the first nature and
dropping the second one. In other words, the resulting nature is
the group of the most top-left nature of the merged cells.
"""
__slots__ = ('_styles', 'nature')
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
#: Dictionary of key-value pairs, where *keys* are the style names.
self.styles = styles
#: Cell *nature* used to distinguish the body cells, from the header and the footer.
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return "<{cls}({items}, {nature!r})>".format(cls=cls, items=items, nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
# each cell owns it's own copy of the styles
self._styles = {} if styles is None else styles.copy()
|
normal
|
{
"blob_id": "8fa58791aae1352109b3bf7410d68bf5ae1d8cb7",
"index": 9559,
"step-1": "<mask token>\n\n\nclass Styled(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-2": "<mask token>\n\n\nclass Styled(object):\n <mask token>\n <mask token>\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-3": "<mask token>\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = '_styles', 'nature'\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-4": "<mask token>\nimport pprint\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = '_styles', 'nature'\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n self.styles = styles\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return '<{cls}({items}, {nature!r})>'.format(cls=cls, items=items,\n nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n self._styles = {} if styles is None else styles.copy()\n",
"step-5": "# coding: utf-8\n\"\"\"\nStyled object\n=============\n\nA :class:`~benker.styled.Styled` object contains a dictionary of styles.\n\nIt is mainly used for :class:`~benker.table.Table`, :class:`~benker.table.RowView`,\n:class:`~benker.table.ColView`, and :class:`~benker.cell.Cell`.\n\n\"\"\"\nimport pprint\n\n\nclass Styled(object):\n \"\"\"\n Styled object, like Table, Row, Column, or Cell objects.\n\n A styled object stores user-defined styles: a dictionary of key-value pairs.\n This values are useful to store some HTML-like styles (border-style,\n border-width, border-color, vertical-align, text-align, etc.).\n Of course, we are not tied to the HTML-like styles, you can use your\n own list of styles.\n\n .. note::\n\n The style dictionary is always copied: in other words, key-value pairs\n are copied but a shallow copy is done for the values (in general, it\n is not a problem if you use non-mutable values like :class:`str`).\n\n A styled object stores a nature: a way to distinguish the body cells,\n from the header and the footer. The default value is ``None``, but you can\n use \"body\", \"header\", \"footer\" or whatever is suitable for your needs.\n This kind of information is in general not stored in the styles,\n even if it is similar.\n\n Tables can also have a *nature*, similar to HTML ``@class`` attribute,\n you can use it do identify the styles to apply to your table.\n\n .. note::\n\n In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`\n of two natures is done by keeping the first nature and\n dropping the second one. In other words, the resulting nature is\n the group of the most top-left nature of the merged cells.\n\n \"\"\"\n __slots__ = ('_styles', 'nature')\n\n def __init__(self, styles, nature):\n \"\"\"\n Construct a styled object from a dictionary of styles.\n\n :type styles: typing.Dict[str, str]\n :param styles:\n Dictionary of key-value pairs, where *keys* are the style names.\n\n :type nature: str\n :ivar nature:\n Cell *nature* used to distinguish the body cells, from the header and the footer.\n\n Table *nature* used to store a value similar to HTML ``@class`` attribute.\n \"\"\"\n #: Dictionary of key-value pairs, where *keys* are the style names.\n self.styles = styles\n\n #: Cell *nature* used to distinguish the body cells, from the header and the footer.\n self.nature = nature\n\n def __str__(self):\n return str(self._styles)\n\n def __repr__(self):\n cls = self.__class__.__name__\n items = pprint.pformat(self._styles)\n nature = self.nature\n return \"<{cls}({items}, {nature!r})>\".format(cls=cls, items=items, nature=nature)\n\n @property\n def styles(self):\n \"\"\" Dictionary of styles: key-value pairs. \"\"\"\n return self._styles\n\n @styles.setter\n def styles(self, styles):\n \"\"\" Setup the dictionary of styles (shallow copy of the items). \"\"\"\n # each cell owns it's own copy of the styles\n self._styles = {} if styles is None else styles.copy()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
def solution(a, b):
answer = 0;
for i in range(0,len(a)):
answer+=a[i]*b[i];
print(answer);
return answer
solution([1,2,3,4],[-3,-1,0,2]);
|
normal
|
{
"blob_id": "5fd34c698c2060d5399ba43f6746527961aa574b",
"index": 9239,
"step-1": "<mask token>\n",
"step-2": "def solution(a, b):\n answer = 0\n for i in range(0, len(a)):\n answer += a[i] * b[i]\n print(answer)\n return answer\n\n\n<mask token>\n",
"step-3": "def solution(a, b):\n answer = 0\n for i in range(0, len(a)):\n answer += a[i] * b[i]\n print(answer)\n return answer\n\n\nsolution([1, 2, 3, 4], [-3, -1, 0, 2])\n",
"step-4": "def solution(a, b):\n answer = 0;\n\n for i in range(0,len(a)):\n answer+=a[i]*b[i];\n\n print(answer); \n return answer\n\nsolution([1,2,3,4],[-3,-1,0,2]);",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#day11
n = int(input("Enter a number: "))
c = 0
a,b = 0, 1
list = [a, b]
for i in range(2,n+1):
c = a+b
list.append(c)
a,b = b, c
print(n,"th fibonacci number is ",list[n])
|
normal
|
{
"blob_id": "255cdbce1f9f7709165b1a29362026ad92ba4712",
"index": 2303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-3": "n = int(input('Enter a number: '))\nc = 0\na, b = 0, 1\nlist = [a, b]\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-4": "#day11\nn = int(input(\"Enter a number: \"))\nc = 0\na,b = 0, 1\nlist = [a, b]\nfor i in range(2,n+1):\n c = a+b\n list.append(c)\n a,b = b, c\nprint(n,\"th fibonacci number is \",list[n])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright 2021 Yegor Bitensky
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DiceEmptyInialItemsError(Exception):
def __init__(self):
super().__init__(
"To dice creation "
"whether \"faces_count\" or \"faces_items\" "
"argsuments need to be passed."
)
class DiceWrongFacesCountTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_count\" argsument type need to be \"int\".")
class DiceWrongFacesCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_count\" argsument need to be greater or equal to {min_count}.")
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_items\" argsument need to be iterable.")
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_items\" count need to be greater or equal to {min_count}.")
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__("Dice instance expected.")
|
normal
|
{
"blob_id": "5750fd4b59f75ea63b4214ee66b23602ed4d314d",
"index": 8909,
"step-1": "<mask token>\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-2": "<mask token>\n\n\nclass DiceWrongFacesCountError(Exception):\n <mask token>\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-3": "<mask token>\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n <mask token>\n\n\nclass DiceWrongFacesCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_count\" argsument need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-4": "class DiceEmptyInialItemsError(Exception):\n\n def __init__(self):\n super().__init__(\n 'To dice creation whether \"faces_count\" or \"faces_items\" argsuments need to be passed.'\n )\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_count\" argsument type need to be \"int\".')\n\n\nclass DiceWrongFacesCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_count\" argsument need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-5": "# Copyright 2021 Yegor Bitensky\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass DiceEmptyInialItemsError(Exception):\n def __init__(self):\n super().__init__(\n \"To dice creation \"\n \"whether \\\"faces_count\\\" or \\\"faces_items\\\" \"\n \"argsuments need to be passed.\"\n )\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n def __init__(self):\n super().__init__(\"Dice \\\"faces_count\\\" argsument type need to be \\\"int\\\".\")\n\n\nclass DiceWrongFacesCountError(Exception):\n def __init__(self, min_count):\n super().__init__(f\"Dice \\\"faces_count\\\" argsument need to be greater or equal to {min_count}.\")\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n def __init__(self):\n super().__init__(\"Dice \\\"faces_items\\\" argsument need to be iterable.\")\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n def __init__(self, min_count):\n super().__init__(f\"Dice \\\"faces_items\\\" count need to be greater or equal to {min_count}.\")\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n def __init__(self):\n super().__init__(\"Dice instance expected.\")\n",
"step-ids": [
6,
7,
9,
12,
13
]
}
|
[
6,
7,
9,
12,
13
] |
#!/usr/bin/env python3
import numpy as np
from DMP.PIDMP import RLDMPs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(50)
dmp_y0 = np.array([-1.52017496, 0.04908739, 1.41433029])
dmp_goal = np.array([-1.50848603, 0.0591503 , 1.44347592])
load_file_name = "w_0_2_right_3_100_1000.0_0.01_4"
#load_file_name = raw_input('file name: ')
load_file_name_list = load_file_name.split('_')
### learning ep
ep = int(load_file_name_list[1])
### pouring number of ball to the other tube
numofball = int(load_file_name_list[2])
### which arm do the pouring motion
pour_arm = load_file_name_list[3]
n_dmps = int(load_file_name_list[4])
n_bfs = int(load_file_name_list[5])
decay = float(load_file_name_list[6])
dt = float(load_file_name_list[7])
### initial DMP
rl = RLDMPs(n_dmps = n_dmps , n_bfs = n_bfs , decay = decay, y0 = dmp_y0 , goal = dmp_goal,ay=np.ones(n_dmps)*10.0,dt = dt)
rl.load_weight(load_file_name)
traj_init = rl.predict().y
track = rl.rollout()
print(rl.w)
x = np.linspace(0,1,len(traj_init[0][:,0]))
plt.scatter(x,track.y[0][:,0],c='b',label="random")
plt.scatter(x,track.y[1][:,0],c='b')
plt.scatter(x,track.y[2][:,0],c='b')
plt.scatter(x,track.y[3][:,0],c='b')
plt.scatter(x,track.y[4][:,0],c='b')
plt.scatter(x,traj_init[0][:,0],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("raw (rad)")
plt.legend(loc = 4)
plt.show()
plt.scatter(x,track.y[0][:,1],c='b',label="random")
plt.scatter(x,track.y[1][:,1],c='b')
plt.scatter(x,track.y[2][:,1],c='b')
plt.scatter(x,track.y[3][:,1],c='b')
plt.scatter(x,track.y[4][:,1],c='b')
plt.scatter(x,traj_init[0][:,1],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("yaw (rad)")
plt.legend(loc = 4)
plt.show()
plt.scatter(x,track.y[0][:,2],c='b',label="random")
plt.scatter(x,track.y[1][:,2],c='b')
plt.scatter(x,track.y[2][:,2],c='b')
plt.scatter(x,track.y[3][:,2],c='b')
plt.scatter(x,track.y[4][:,2],c='b')
plt.scatter(x,traj_init[0][:,2],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("pitch (rad)")
plt.legend(loc = 4)
plt.show()
|
normal
|
{
"blob_id": "5e6bbb10ec82e566c749dd4d794eabd2e8f7a648",
"index": 4488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(50)\n<mask token>\nrl.load_weight(load_file_name)\n<mask token>\nprint(rl.w)\n<mask token>\nplt.scatter(x, track.y[0][:, 0], c='b', label='random')\nplt.scatter(x, track.y[1][:, 0], c='b')\nplt.scatter(x, track.y[2][:, 0], c='b')\nplt.scatter(x, track.y[3][:, 0], c='b')\nplt.scatter(x, track.y[4][:, 0], c='b')\nplt.scatter(x, traj_init[0][:, 0], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('raw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 1], c='b', label='random')\nplt.scatter(x, track.y[1][:, 1], c='b')\nplt.scatter(x, track.y[2][:, 1], c='b')\nplt.scatter(x, track.y[3][:, 1], c='b')\nplt.scatter(x, track.y[4][:, 1], c='b')\nplt.scatter(x, traj_init[0][:, 1], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('yaw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 2], c='b', label='random')\nplt.scatter(x, track.y[1][:, 2], c='b')\nplt.scatter(x, track.y[2][:, 2], c='b')\nplt.scatter(x, track.y[3][:, 2], c='b')\nplt.scatter(x, track.y[4][:, 2], c='b')\nplt.scatter(x, traj_init[0][:, 2], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('pitch (rad)')\nplt.legend(loc=4)\nplt.show()\n",
"step-3": "<mask token>\nnp.random.seed(50)\ndmp_y0 = np.array([-1.52017496, 0.04908739, 1.41433029])\ndmp_goal = np.array([-1.50848603, 0.0591503, 1.44347592])\nload_file_name = 'w_0_2_right_3_100_1000.0_0.01_4'\nload_file_name_list = load_file_name.split('_')\nep = int(load_file_name_list[1])\nnumofball = int(load_file_name_list[2])\npour_arm = load_file_name_list[3]\nn_dmps = int(load_file_name_list[4])\nn_bfs = int(load_file_name_list[5])\ndecay = float(load_file_name_list[6])\ndt = float(load_file_name_list[7])\nrl = RLDMPs(n_dmps=n_dmps, n_bfs=n_bfs, decay=decay, y0=dmp_y0, goal=\n dmp_goal, ay=np.ones(n_dmps) * 10.0, dt=dt)\nrl.load_weight(load_file_name)\ntraj_init = rl.predict().y\ntrack = rl.rollout()\nprint(rl.w)\nx = np.linspace(0, 1, len(traj_init[0][:, 0]))\nplt.scatter(x, track.y[0][:, 0], c='b', label='random')\nplt.scatter(x, track.y[1][:, 0], c='b')\nplt.scatter(x, track.y[2][:, 0], c='b')\nplt.scatter(x, track.y[3][:, 0], c='b')\nplt.scatter(x, track.y[4][:, 0], c='b')\nplt.scatter(x, traj_init[0][:, 0], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('raw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 1], c='b', label='random')\nplt.scatter(x, track.y[1][:, 1], c='b')\nplt.scatter(x, track.y[2][:, 1], c='b')\nplt.scatter(x, track.y[3][:, 1], c='b')\nplt.scatter(x, track.y[4][:, 1], c='b')\nplt.scatter(x, traj_init[0][:, 1], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('yaw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 2], c='b', label='random')\nplt.scatter(x, track.y[1][:, 2], c='b')\nplt.scatter(x, track.y[2][:, 2], c='b')\nplt.scatter(x, track.y[3][:, 2], c='b')\nplt.scatter(x, track.y[4][:, 2], c='b')\nplt.scatter(x, traj_init[0][:, 2], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('pitch (rad)')\nplt.legend(loc=4)\nplt.show()\n",
"step-4": "import numpy as np\nfrom DMP.PIDMP import RLDMPs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nnp.random.seed(50)\ndmp_y0 = np.array([-1.52017496, 0.04908739, 1.41433029])\ndmp_goal = np.array([-1.50848603, 0.0591503, 1.44347592])\nload_file_name = 'w_0_2_right_3_100_1000.0_0.01_4'\nload_file_name_list = load_file_name.split('_')\nep = int(load_file_name_list[1])\nnumofball = int(load_file_name_list[2])\npour_arm = load_file_name_list[3]\nn_dmps = int(load_file_name_list[4])\nn_bfs = int(load_file_name_list[5])\ndecay = float(load_file_name_list[6])\ndt = float(load_file_name_list[7])\nrl = RLDMPs(n_dmps=n_dmps, n_bfs=n_bfs, decay=decay, y0=dmp_y0, goal=\n dmp_goal, ay=np.ones(n_dmps) * 10.0, dt=dt)\nrl.load_weight(load_file_name)\ntraj_init = rl.predict().y\ntrack = rl.rollout()\nprint(rl.w)\nx = np.linspace(0, 1, len(traj_init[0][:, 0]))\nplt.scatter(x, track.y[0][:, 0], c='b', label='random')\nplt.scatter(x, track.y[1][:, 0], c='b')\nplt.scatter(x, track.y[2][:, 0], c='b')\nplt.scatter(x, track.y[3][:, 0], c='b')\nplt.scatter(x, track.y[4][:, 0], c='b')\nplt.scatter(x, traj_init[0][:, 0], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('raw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 1], c='b', label='random')\nplt.scatter(x, track.y[1][:, 1], c='b')\nplt.scatter(x, track.y[2][:, 1], c='b')\nplt.scatter(x, track.y[3][:, 1], c='b')\nplt.scatter(x, track.y[4][:, 1], c='b')\nplt.scatter(x, traj_init[0][:, 1], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('yaw (rad)')\nplt.legend(loc=4)\nplt.show()\nplt.scatter(x, track.y[0][:, 2], c='b', label='random')\nplt.scatter(x, track.y[1][:, 2], c='b')\nplt.scatter(x, track.y[2][:, 2], c='b')\nplt.scatter(x, track.y[3][:, 2], c='b')\nplt.scatter(x, track.y[4][:, 2], c='b')\nplt.scatter(x, traj_init[0][:, 2], c='r', label='initial')\nplt.xlabel('time(s)')\nplt.ylabel('pitch (rad)')\nplt.legend(loc=4)\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\nimport numpy as np\nfrom DMP.PIDMP import RLDMPs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nnp.random.seed(50)\ndmp_y0 = np.array([-1.52017496, 0.04908739, 1.41433029])\ndmp_goal = np.array([-1.50848603, 0.0591503 , 1.44347592])\n \n\nload_file_name = \"w_0_2_right_3_100_1000.0_0.01_4\"\n#load_file_name = raw_input('file name: ')\nload_file_name_list = load_file_name.split('_')\n### learning ep\nep = int(load_file_name_list[1])\n### pouring number of ball to the other tube\nnumofball = int(load_file_name_list[2])\n### which arm do the pouring motion\npour_arm = load_file_name_list[3]\nn_dmps = int(load_file_name_list[4])\nn_bfs = int(load_file_name_list[5])\ndecay = float(load_file_name_list[6])\ndt = float(load_file_name_list[7])\n\n### initial DMP\nrl = RLDMPs(n_dmps = n_dmps , n_bfs = n_bfs , decay = decay, y0 = dmp_y0 , goal = dmp_goal,ay=np.ones(n_dmps)*10.0,dt = dt)\n\nrl.load_weight(load_file_name)\n\ntraj_init = rl.predict().y\ntrack = rl.rollout()\n\nprint(rl.w)\n\nx = np.linspace(0,1,len(traj_init[0][:,0]))\n\nplt.scatter(x,track.y[0][:,0],c='b',label=\"random\")\nplt.scatter(x,track.y[1][:,0],c='b')\nplt.scatter(x,track.y[2][:,0],c='b')\nplt.scatter(x,track.y[3][:,0],c='b')\nplt.scatter(x,track.y[4][:,0],c='b')\nplt.scatter(x,traj_init[0][:,0],c='r',label=\"initial\")\nplt.xlabel(\"time(s)\")\nplt.ylabel(\"raw (rad)\")\nplt.legend(loc = 4)\nplt.show()\n\n\nplt.scatter(x,track.y[0][:,1],c='b',label=\"random\")\nplt.scatter(x,track.y[1][:,1],c='b')\nplt.scatter(x,track.y[2][:,1],c='b')\nplt.scatter(x,track.y[3][:,1],c='b')\nplt.scatter(x,track.y[4][:,1],c='b')\nplt.scatter(x,traj_init[0][:,1],c='r',label=\"initial\")\nplt.xlabel(\"time(s)\")\nplt.ylabel(\"yaw (rad)\")\nplt.legend(loc = 4)\nplt.show()\n\n\nplt.scatter(x,track.y[0][:,2],c='b',label=\"random\")\nplt.scatter(x,track.y[1][:,2],c='b')\nplt.scatter(x,track.y[2][:,2],c='b')\nplt.scatter(x,track.y[3][:,2],c='b')\nplt.scatter(x,track.y[4][:,2],c='b')\nplt.scatter(x,traj_init[0][:,2],c='r',label=\"initial\")\n\nplt.xlabel(\"time(s)\")\nplt.ylabel(\"pitch (rad)\")\nplt.legend(loc = 4)\nplt.show()\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
from Spreadsheet.HTML import Table
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } )
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate(),
"colgroup present from generate()"
)
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',
gen.generate( { 'tgroups': 2 } ),
"colgroup present from generate() with tgroups"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None } ),
"colgroup can be overriden"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': 1 } ),
"colgroup yields no-op if scalar"
)
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [ { 'span': 1, 'color': 'red' }, { 'span': 2, 'color': 'blue' } ] } ),
"can specify multiple colgroups"
)
def test_col(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } );
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': {} } ),
"colgroup wraps col"
)
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': [{},{},{}] } ),
"colgroup wraps multiple cols"
)
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None, 'col': {} } ),
"colgroup can be overriden when col is present too"
)
gen = Table( { 'data': data, 'col': [{},{},{}] } );
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': {} } ),
"multiple cols against single colgroup"
)
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': None, 'colgroup': [{},{},{}] } ),
"no cols against multiple colgroups"
)
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [{},{},{}] } ),
"multiple cols against multiple colgroups"
)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "24f87bd6aab0ff65cf2153e27df31122818ad0ac",
"index": 766,
"step-1": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n <mask token>\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom Spreadsheet.HTML import Table\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom Spreadsheet.HTML import Table\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n\n data = [\n ['a','b','c'],\n [1,2,3],\n [4,5,6],\n ]\n\n gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate(),\n \"colgroup present from generate()\"\n )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',\n gen.generate( { 'tgroups': 2 } ),\n \"colgroup present from generate() with tgroups\"\n )\n\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': None } ),\n \"colgroup can be overriden\"\n )\n\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': 1 } ),\n \"colgroup yields no-op if scalar\"\n )\n\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': [ { 'span': 1, 'color': 'red' }, { 'span': 2, 'color': 'blue' } ] } ),\n \"can specify multiple colgroups\"\n )\n\n\n def test_col(self):\n return\n\n data = [\n ['a','b','c'],\n [1,2,3],\n [4,5,6],\n ]\n\n gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } );\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': {} } ),\n \"colgroup wraps col\"\n )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': [{},{},{}] } ),\n \"colgroup wraps multiple cols\"\n )\n\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': None, 'col': {} } ),\n \"colgroup can be overriden when col is present too\"\n )\n\n\n gen = Table( { 'data': data, 'col': [{},{},{}] } );\n\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': {} } ),\n \"multiple cols against single colgroup\"\n )\n\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': None, 'colgroup': [{},{},{}] } ),\n \"no cols against multiple colgroups\"\n )\n\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': [{},{},{}] } ),\n \"multiple cols against multiple colgroups\"\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import input_data
import tensorflow as tf
from infogan import InfoGAN
if __name__ == '__main__':
# get input data
mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data', one_hot=True)
num_sample = mnist_data.train.num_examples
dataset = 'mnist'
if dataset == 'mnist':
input_dim = 784
# define latent dimension
z_dim = 16
c_discrete_dim = 10
c_continuous_dim = 2
num_epoch = 1000000
batch_size = 32
# Launch the session
with tf.Session() as sess:
gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,
dataset=dataset, input_dim=input_dim, z_dim=z_dim, c_discrete_dim=c_discrete_dim,
c_continuous_dim=c_continuous_dim)
# build generative adversarial network
gan.build_net()
# train the model
gan.train(mnist_data.train, num_sample)
|
normal
|
{
"blob_id": "02a28b61ad9d664c89829df019f4887c2c869f91",
"index": 6046,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data',\n one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n num_epoch = 1000000\n batch_size = 32\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim,\n c_discrete_dim=c_discrete_dim, c_continuous_dim=c_continuous_dim)\n gan.build_net()\n gan.train(mnist_data.train, num_sample)\n",
"step-3": "import input_data\nimport tensorflow as tf\nfrom infogan import InfoGAN\nif __name__ == '__main__':\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data',\n one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n num_epoch = 1000000\n batch_size = 32\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim,\n c_discrete_dim=c_discrete_dim, c_continuous_dim=c_continuous_dim)\n gan.build_net()\n gan.train(mnist_data.train, num_sample)\n",
"step-4": "import input_data\nimport tensorflow as tf\nfrom infogan import InfoGAN\n\nif __name__ == '__main__':\n # get input data\n mnist_data = input_data.load_mnist_dataset('../../dataset/mnist_data', one_hot=True)\n num_sample = mnist_data.train.num_examples\n dataset = 'mnist'\n if dataset == 'mnist':\n input_dim = 784\n\n # define latent dimension\n z_dim = 16\n c_discrete_dim = 10\n c_continuous_dim = 2\n\n num_epoch = 1000000\n batch_size = 32\n\n # Launch the session\n with tf.Session() as sess:\n gan = InfoGAN(sess, num_epoch=num_epoch, batch_size=batch_size,\n dataset=dataset, input_dim=input_dim, z_dim=z_dim, c_discrete_dim=c_discrete_dim,\n c_continuous_dim=c_continuous_dim)\n\n # build generative adversarial network\n gan.build_net()\n\n # train the model\n gan.train(mnist_data.train, num_sample)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import optparse
from camera import apogee_U2000
if __name__ == "__main__":
parser = optparse.OptionParser()
group1 = optparse.OptionGroup(parser, "General")
group1.add_option('--s', action='store', default=1, dest='mode', help='set cooler on/off')
args = parser.parse_args()
options, args = parser.parse_args()
try:
mode = bool(options.mode)
except TypeError:
print "Set must be boolean"
c = apogee_U2000(camera_idx=0)
c.setCooler(mode)
c.disconnect()
|
normal
|
{
"blob_id": "60c849d213f6266aeb0660fde06254dfa635f10f",
"index": 3383,
"step-1": "import optparse\n\nfrom camera import apogee_U2000\t\t\t\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n group1 = optparse.OptionGroup(parser, \"General\") \n group1.add_option('--s', action='store', default=1, dest='mode', help='set cooler on/off')\n\n args = parser.parse_args()\n options, args = parser.parse_args()\n\n try:\n mode = bool(options.mode)\n except TypeError:\n print \"Set must be boolean\"\n\n c = apogee_U2000(camera_idx=0)\n c.setCooler(mode)\n c.disconnect()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)
from IPython.display import Image as IPythonImage
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import requests
from xml.etree import ElementTree as ET
def display_cover(top,bottom ):
name='album_art_raw.png'
album_art_raw = requests.get('https://picsum.photos/500/500/?random')
with open(name,'wb') as album_art_raw_file:
album_art_raw_file.write(album_art_raw.content)
img = Image.open("album_art_raw.png")
draw = ImageDraw.Draw(img)
band_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 25) #25pt font
album_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20) # 20pt font
band_x, band_y = 50, 50
album_x, album_y = 50, 400
outline_color ="black"
draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)
draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)
return img
wikipedia='https://en.wikipedia.org/wiki/Special:Random'
page = requests.get(wikipedia).text.strip()
file= ET.fromstring(page).find('head/title')
band_title = file.text.replace(' - Wikipedia','')
wikipedia='https://en.wikipedia.org/wiki/Special:Random'
page = requests.get(wikipedia).text.strip()p
file= ET.fromstring(page).find('head/title')
album_title = file.text.replace(' - Wikipedia','')
print(album_title)
print("Your band: ", band_title)
print("Your album: ", album_title)
img = display_cover(band_title,album_title)
img.save('sample-out.png')
IPythonImage(filename='sample-out.png')
|
normal
|
{
"blob_id": "07215403750be53994ae36727b6f790202b88697",
"index": 253,
"step-1": "# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)\nfrom IPython.display import Image as IPythonImage\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nimport requests\nfrom xml.etree import ElementTree as ET\n\ndef display_cover(top,bottom ):\n \n name='album_art_raw.png'\n album_art_raw = requests.get('https://picsum.photos/500/500/?random')\n \n with open(name,'wb') as album_art_raw_file:\n album_art_raw_file.write(album_art_raw.content)\n\n img = Image.open(\"album_art_raw.png\")\n draw = ImageDraw.Draw(img)\n\n band_name_font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 25) #25pt font\n album_name_font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 20) # 20pt font\n\n band_x, band_y = 50, 50\n album_x, album_y = 50, 400\n\n outline_color =\"black\"\n\n draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)\n\n draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)\n\n draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)\n draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)\n\n return img\n\n\nwikipedia='https://en.wikipedia.org/wiki/Special:Random'\npage = requests.get(wikipedia).text.strip()\nfile= ET.fromstring(page).find('head/title')\nband_title = file.text.replace(' - Wikipedia','')\n\nwikipedia='https://en.wikipedia.org/wiki/Special:Random'\npage = requests.get(wikipedia).text.strip()p\nfile= ET.fromstring(page).find('head/title')\nalbum_title = file.text.replace(' - Wikipedia','')\nprint(album_title)\n\nprint(\"Your band: \", band_title)\nprint(\"Your album: \", album_title)\n\nimg = display_cover(band_title,album_title)\n\n\nimg.save('sample-out.png')\n\nIPythonImage(filename='sample-out.png')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
__author__ = 'virtual'
statuses = {
None: {'name': 'None', },
-1: { 'name': 'unknown', },
0: { 'name': '',},
1: { 'name': 'Новый',},
2: { 'name': '',},
3: { 'name': 'Активный', },
4: { 'name': 'Приостановленный',},
5: { 'name': 'Заблокированный', },
6: { 'name': 'Удаленный', },
7: { 'name': 'Закрытый', },
8: { 'name': '', },
}
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'], )
|
normal
|
{
"blob_id": "a847fc32af2602db3b5545c15186c0209eb8ae8d",
"index": 4008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-3": "__author__ = 'virtual'\nstatuses = {None: {'name': 'None'}, (-1): {'name': 'unknown'}, (0): {'name':\n ''}, (1): {'name': 'Новый'}, (2): {'name': ''}, (3): {'name':\n 'Активный'}, (4): {'name': 'Приостановленный'}, (5): {'name':\n 'Заблокированный'}, (6): {'name': 'Удаленный'}, (7): {'name':\n 'Закрытый'}, (8): {'name': ''}}\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-4": "# -*- coding: utf-8 -*-\n\n__author__ = 'virtual'\n\n\nstatuses = {\n None: {'name': 'None', },\n -1: { 'name': 'unknown', },\n 0: { 'name': '',},\n 1: { 'name': 'Новый',},\n 2: { 'name': '',},\n 3: { 'name': 'Активный', },\n 4: { 'name': 'Приостановленный',},\n 5: { 'name': 'Заблокированный', },\n 6: { 'name': 'Удаленный', },\n 7: { 'name': 'Закрытый', },\n 8: { 'name': '', },\n}\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'], )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
import platform
from numpy import ctypeslib,empty,array,exp,ascontiguousarray,zeros,asfortranarray
from ctypes import c_float,c_double,c_int
from time import time
def resize(img,scale):
"""
downsample img to scale
"""
sdims=img.shape
datatype=c_double
if img.dtype!=datatype:
print "Error the image must be of doubles!"
raise RuntimeError
if scale>1.0:
print "Invalid scaling factor!"
raise RuntimeError
img = asfortranarray(img,c_double) # make array continguous
try:
mresize = ctypeslib.load_library("libresize.so",".")
except:
print "Unable to load resize library"
raise RuntimeError
#use two times the 1d resize to get a 2d resize
fresize = mresize.resize1dtran
fresize.restype = None
fresize.argtypes = [ ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int,ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int, c_int , c_int ]
ddims = [int(round(sdims[0]*scale)),int(round(sdims[1]*scale)),sdims[2]];
mxdst = zeros((ddims), dtype=datatype)
tmp = zeros((ddims[0],sdims[1],sdims[2]), dtype=datatype)
img1=img
t1=time()
fresize(img1, sdims[0], tmp, ddims[0], sdims[1], sdims[2]);
fresize(tmp, sdims[1], mxdst, ddims[1], ddims[0], sdims[2]);
t2=time()
return mxdst.reshape(ddims[2],ddims[1],ddims[0]).T
if __name__ == "__main__":
from numpy.random import random_integers
from time import time
from pylab import imread,figure,imshow
from ctypes import c_float,c_double,c_int
img=imread("test.png").astype(c_double)
imshow(img)
img1=resize(img,0.25)
figure()
imshow(img1)
|
normal
|
{
"blob_id": "816f4cfe98f5e5b23f2c8f9f42c5f3ed8458042f",
"index": 3700,
"step-1": "#!/usr/bin/python \n\nimport platform\nfrom numpy import ctypeslib,empty,array,exp,ascontiguousarray,zeros,asfortranarray\nfrom ctypes import c_float,c_double,c_int\nfrom time import time\n\ndef resize(img,scale):\n \"\"\"\n downsample img to scale \n \"\"\"\n sdims=img.shape\n datatype=c_double\n if img.dtype!=datatype:\n print \"Error the image must be of doubles!\"\n raise RuntimeError\n\n if scale>1.0:\n print \"Invalid scaling factor!\"\n raise RuntimeError \n \n img = asfortranarray(img,c_double) # make array continguous\n \n try:\n mresize = ctypeslib.load_library(\"libresize.so\",\".\") \n except:\n print \"Unable to load resize library\"\n raise RuntimeError\n \n #use two times the 1d resize to get a 2d resize\n fresize = mresize.resize1dtran\n fresize.restype = None\n fresize.argtypes = [ ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int,ctypeslib.ndpointer(dtype=datatype, ndim=3), c_int, c_int , c_int ]\n ddims = [int(round(sdims[0]*scale)),int(round(sdims[1]*scale)),sdims[2]];\n mxdst = zeros((ddims), dtype=datatype)\n tmp = zeros((ddims[0],sdims[1],sdims[2]), dtype=datatype)\n img1=img\n t1=time()\n fresize(img1, sdims[0], tmp, ddims[0], sdims[1], sdims[2]);\n fresize(tmp, sdims[1], mxdst, ddims[1], ddims[0], sdims[2]);\n t2=time()\n return mxdst.reshape(ddims[2],ddims[1],ddims[0]).T\n\n\nif __name__ == \"__main__\":\n from numpy.random import random_integers\n from time import time\n from pylab import imread,figure,imshow\n from ctypes import c_float,c_double,c_int\n \n img=imread(\"test.png\").astype(c_double)\n imshow(img)\n img1=resize(img,0.25)\n figure()\n imshow(img1)\n\n \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import time
class Solution(object):
def __init__(self):
self.n = None
self.memory = dict()
def dfs(self, bottom, energy):
# optimize for memory, save search time for duplicate results
if (bottom,energy) in self.memory:
return self.memory[(bottom,energy)]
# ending search
if energy == 1:
return [[bottom]]
results = []
for v in range(bottom, self.n):
tail_list = self.dfs(v+1, energy-1)
for result in tail_list:
results.append([bottom]+result)
self.memory[(bottom,energy)] = results
return results
def memory_search(self, n, k):
self.n = n
self.memory = dict()
results = []
for i in range(1, n+1-k+1):
combinations = self.dfs(i, k)
if combinations is not None:
results = results + combinations
return results
def dp(self, n, k):
# initialize: F[n,1]
tmp = []
pre_k_results = {}
for i in range(1,n+1):
tmp.append([i])
pre_k_results[i] = tmp.copy()
results = pre_k_results
# F[n,k] = F[n-1,k] + (item + [n] for item in F[n-1, k-1])
for col in range(2,k+1):
cur_k_results = {}
for row in range(col,n-k+col+1):
cur_results = []
# Part1: F[n-1, k]
if row > col:
cur_results = cur_results + pre_n_results
# Part2: (item + [n] for item in F[n-1, k-1])
for item in pre_k_results[row-1]:
cur_results.append(item+[row])
pre_n_results = cur_results
cur_k_results[row] = cur_results
pre_k_results = cur_k_results
results = cur_k_results
return results[n]
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
results = self.memory_search(n, k)
# results = self.dp(n, k)
return results
def main():
# n, k = 4, 1
# start = time.time()
# ret2 = Solution().dp(n, k)
# end = time.time()
# dp_time = round((end-start)*1000*1000,2)
# print(ret2, dp_time)
## time consume test
for n in range(5,10):
for k in range(2,n):
start = time.time()
ret1 = Solution().memory_search(n, k)
end = time.time()
memory_search_time = round((end-start)*1000*1000,2)
start = time.time()
ret2 = Solution().dp(n, k)
end = time.time()
dp_time = round((end-start)*1000*1000,2)
print("n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms".format(**locals()))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "d52b6dda7111aefb7f9a7b10ad606cda615389d9",
"index": 7123,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n <mask token>\n <mask token>\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n <mask token>\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n tmp = []\n pre_k_results = {}\n for i in range(1, n + 1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n results = pre_k_results\n for col in range(2, k + 1):\n cur_k_results = {}\n for row in range(col, n - k + col + 1):\n cur_results = []\n if row > col:\n cur_results = cur_results + pre_n_results\n for item in pre_k_results[row - 1]:\n cur_results.append(item + [row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\ndef main():\n for n in range(5, 10):\n for k in range(2, n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end - start) * 1000 * 1000, 2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end - start) * 1000 * 1000, 2)\n print(\n 'n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms'\n .format(**locals()))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n tmp = []\n pre_k_results = {}\n for i in range(1, n + 1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n results = pre_k_results\n for col in range(2, k + 1):\n cur_k_results = {}\n for row in range(col, n - k + col + 1):\n cur_results = []\n if row > col:\n cur_results = cur_results + pre_n_results\n for item in pre_k_results[row - 1]:\n cur_results.append(item + [row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\ndef main():\n for n in range(5, 10):\n for k in range(2, n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end - start) * 1000 * 1000, 2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end - start) * 1000 * 1000, 2)\n print(\n 'n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms'\n .format(**locals()))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time\nclass Solution(object):\n def __init__(self):\n self.n = None\n self.memory = dict()\n \n def dfs(self, bottom, energy):\n # optimize for memory, save search time for duplicate results\n if (bottom,energy) in self.memory:\n return self.memory[(bottom,energy)]\n # ending search\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v+1, energy-1)\n for result in tail_list:\n results.append([bottom]+result)\n self.memory[(bottom,energy)] = results\n return results\n \n def memory_search(self, n, k):\n self.n = n \n self.memory = dict()\n results = []\n for i in range(1, n+1-k+1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n \n # initialize: F[n,1]\n tmp = []\n pre_k_results = {}\n for i in range(1,n+1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n \n results = pre_k_results\n # F[n,k] = F[n-1,k] + (item + [n] for item in F[n-1, k-1])\n for col in range(2,k+1):\n cur_k_results = {}\n for row in range(col,n-k+col+1):\n cur_results = []\n # Part1: F[n-1, k]\n if row > col:\n cur_results = cur_results + pre_n_results\n # Part2: (item + [n] for item in F[n-1, k-1])\n for item in pre_k_results[row-1]:\n cur_results.append(item+[row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n \n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n # results = self.dp(n, k)\n return results\n\ndef main():\n\n # n, k = 4, 1\n # start = time.time()\n # ret2 = Solution().dp(n, k)\n # end = time.time()\n # dp_time = round((end-start)*1000*1000,2) \n # print(ret2, dp_time)\n\n ## time consume test\n for n in range(5,10):\n for k in range(2,n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end-start)*1000*1000,2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end-start)*1000*1000,2)\n print(\"n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms\".format(**locals()))\n\n\nif __name__ == '__main__':\n main() ",
"step-ids": [
3,
5,
7,
8,
10
]
}
|
[
3,
5,
7,
8,
10
] |
from __future__ import annotations
import ibis
from ibis import _
def test_format_sql_query_result(con, snapshot):
t = con.table("airlines")
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({"carrier": "string", "avg_arrdelay": "double"})
with con.set_query_schema(query, schema):
expr = t.sql(query)
# name is autoincremented so we need to set it manually to make the
# snapshot stable
expr = expr.op().copy(name="foo").to_expr()
expr = expr.mutate(
island=_.carrier.lower(),
avg_arrdelay=_.avg_arrdelay.round(1),
)
snapshot.assert_match(repr(expr), "repr.txt")
def test_memoize_database_table(con, snapshot):
table = con.table("test1")
table2 = con.table("test2")
filter_pred = table["f"] > 0
table3 = table[filter_pred]
join_pred = table3["g"] == table2["key"]
joined = table2.inner_join(table3, [join_pred])
met1 = (table3["f"] - table2["value"]).mean().name("foo")
expr = joined.aggregate(
[met1, table3["f"].sum().name("bar")], by=[table3["g"], table2["key"]]
)
result = repr(expr)
assert result.count("test1") == 1
assert result.count("test2") == 1
snapshot.assert_match(result, "repr.txt")
def test_memoize_insert_sort_key(con, snapshot):
table = con.table("airlines")
t = table["arrdelay", "dest"]
expr = t.group_by("dest").mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
worst = expr[expr.dev.notnull()].order_by(ibis.desc("dev")).limit(10)
result = repr(worst)
assert result.count("airlines") == 1
snapshot.assert_match(result, "repr.txt")
|
normal
|
{
"blob_id": "97ff8dae060475b0efbc8d39e9fc251be8ac091b",
"index": 6264,
"step-1": "<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-2": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\n<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-3": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-4": "from __future__ import annotations\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-5": "from __future__ import annotations\n\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table(\"airlines\")\n\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({\"carrier\": \"string\", \"avg_arrdelay\": \"double\"})\n\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n # name is autoincremented so we need to set it manually to make the\n # snapshot stable\n expr = expr.op().copy(name=\"foo\").to_expr()\n\n expr = expr.mutate(\n island=_.carrier.lower(),\n avg_arrdelay=_.avg_arrdelay.round(1),\n )\n\n snapshot.assert_match(repr(expr), \"repr.txt\")\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table(\"test1\")\n table2 = con.table(\"test2\")\n\n filter_pred = table[\"f\"] > 0\n table3 = table[filter_pred]\n join_pred = table3[\"g\"] == table2[\"key\"]\n\n joined = table2.inner_join(table3, [join_pred])\n\n met1 = (table3[\"f\"] - table2[\"value\"]).mean().name(\"foo\")\n expr = joined.aggregate(\n [met1, table3[\"f\"].sum().name(\"bar\")], by=[table3[\"g\"], table2[\"key\"]]\n )\n\n result = repr(expr)\n assert result.count(\"test1\") == 1\n assert result.count(\"test2\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table(\"airlines\")\n\n t = table[\"arrdelay\", \"dest\"]\n expr = t.group_by(\"dest\").mutate(\n dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()\n )\n\n worst = expr[expr.dev.notnull()].order_by(ibis.desc(\"dev\")).limit(10)\n\n result = repr(worst)\n assert result.count(\"airlines\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import math
# 计算像素点属于哪个中心点
from utils.util import distance
def attenuation(color, last_mean):
return 1 - math.exp(((distance(color, last_mean) / 80) ** 2) * -1)
def get_Count_By_distance(centers, pixel_use,d):
# d_min设置过低会产生多的中心点,许多很相似但是没有归到一类中
# d_min设置过高产生少的中心点,不相似的归到一类中
d_min = 1;
d_b = d;
count_use = 0;
for i in range(len(centers)):
d = attenuation(centers[i], pixel_use);
if d < d_min:
d_min = d;
count_use = i;
if d_min < d_b:
count = count_use;
else:
count = -1;
return count;
|
normal
|
{
"blob_id": "918db455fc50b49ca2b40dd78cecdec4ba08dcb8",
"index": 6013,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_Count_By_distance(centers, pixel_use, d):\n d_min = 1\n d_b = d\n count_use = 0\n for i in range(len(centers)):\n d = attenuation(centers[i], pixel_use)\n if d < d_min:\n d_min = d\n count_use = i\n if d_min < d_b:\n count = count_use\n else:\n count = -1\n return count\n",
"step-3": "<mask token>\n\n\ndef attenuation(color, last_mean):\n return 1 - math.exp((distance(color, last_mean) / 80) ** 2 * -1)\n\n\ndef get_Count_By_distance(centers, pixel_use, d):\n d_min = 1\n d_b = d\n count_use = 0\n for i in range(len(centers)):\n d = attenuation(centers[i], pixel_use)\n if d < d_min:\n d_min = d\n count_use = i\n if d_min < d_b:\n count = count_use\n else:\n count = -1\n return count\n",
"step-4": "import math\nfrom utils.util import distance\n\n\ndef attenuation(color, last_mean):\n return 1 - math.exp((distance(color, last_mean) / 80) ** 2 * -1)\n\n\ndef get_Count_By_distance(centers, pixel_use, d):\n d_min = 1\n d_b = d\n count_use = 0\n for i in range(len(centers)):\n d = attenuation(centers[i], pixel_use)\n if d < d_min:\n d_min = d\n count_use = i\n if d_min < d_b:\n count = count_use\n else:\n count = -1\n return count\n",
"step-5": "import math\n\n# 计算像素点属于哪个中心点\nfrom utils.util import distance\n\n\ndef attenuation(color, last_mean):\n return 1 - math.exp(((distance(color, last_mean) / 80) ** 2) * -1)\ndef get_Count_By_distance(centers, pixel_use,d):\n\n # d_min设置过低会产生多的中心点,许多很相似但是没有归到一类中\n # d_min设置过高产生少的中心点,不相似的归到一类中\n d_min = 1;\n d_b = d;\n count_use = 0;\n for i in range(len(centers)):\n\n d = attenuation(centers[i], pixel_use);\n if d < d_min:\n d_min = d;\n count_use = i;\n\n if d_min < d_b:\n count = count_use;\n else:\n count = -1;\n return count;\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from fixate.reporting.csv import register_csv, unregister_csv
|
normal
|
{
"blob_id": "c70db0fc9d98657e318ecab7eb8af60cc2b19a2c",
"index": 4145,
"step-1": "<mask token>\n",
"step-2": "from fixate.reporting.csv import register_csv, unregister_csv\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from flask import Flask,request,Response
from spamapp.spam import SpamIdentify
from json import dumps,loads
app = Flask(__name__)
spam = SpamIdentify()
@app.route("/",methods=['GET'])
def home():
return Response(response=dumps({"msg":"App successfull"}), status=200, mimetype='application/json')
@app.route("/spamapi/",methods=['GET','POST'])
def apicall():
try:
predTxt = loads(request.data)
predTxt = predTxt['input']
response = spam.predict_data(predTxt)
return Response(response=dumps(response), status=200, mimetype='application/json')
except Exception as e:
print("Error",e)
return Response(response=dumps({"result": 6}), status=200, mimetype='application/json')
if __name__ == "__main__":
app.run(
host="192.168.2.240",
port=5000,
debug=True
)
|
normal
|
{
"blob_id": "1552d862d3b9df45eda8c08256e8b4437ab08740",
"index": 2641,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return Response(response=dumps({'msg': 'App successfull'}), status=200,\n mimetype='application/json')\n\n\[email protected]('/spamapi/', methods=['GET', 'POST'])\ndef apicall():\n try:\n predTxt = loads(request.data)\n predTxt = predTxt['input']\n response = spam.predict_data(predTxt)\n return Response(response=dumps(response), status=200, mimetype=\n 'application/json')\n except Exception as e:\n print('Error', e)\n return Response(response=dumps({'result': 6}), status=200, mimetype\n ='application/json')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return Response(response=dumps({'msg': 'App successfull'}), status=200,\n mimetype='application/json')\n\n\[email protected]('/spamapi/', methods=['GET', 'POST'])\ndef apicall():\n try:\n predTxt = loads(request.data)\n predTxt = predTxt['input']\n response = spam.predict_data(predTxt)\n return Response(response=dumps(response), status=200, mimetype=\n 'application/json')\n except Exception as e:\n print('Error', e)\n return Response(response=dumps({'result': 6}), status=200, mimetype\n ='application/json')\n\n\nif __name__ == '__main__':\n app.run(host='192.168.2.240', port=5000, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nspam = SpamIdentify()\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return Response(response=dumps({'msg': 'App successfull'}), status=200,\n mimetype='application/json')\n\n\[email protected]('/spamapi/', methods=['GET', 'POST'])\ndef apicall():\n try:\n predTxt = loads(request.data)\n predTxt = predTxt['input']\n response = spam.predict_data(predTxt)\n return Response(response=dumps(response), status=200, mimetype=\n 'application/json')\n except Exception as e:\n print('Error', e)\n return Response(response=dumps({'result': 6}), status=200, mimetype\n ='application/json')\n\n\nif __name__ == '__main__':\n app.run(host='192.168.2.240', port=5000, debug=True)\n",
"step-4": "from flask import Flask, request, Response\nfrom spamapp.spam import SpamIdentify\nfrom json import dumps, loads\napp = Flask(__name__)\nspam = SpamIdentify()\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return Response(response=dumps({'msg': 'App successfull'}), status=200,\n mimetype='application/json')\n\n\[email protected]('/spamapi/', methods=['GET', 'POST'])\ndef apicall():\n try:\n predTxt = loads(request.data)\n predTxt = predTxt['input']\n response = spam.predict_data(predTxt)\n return Response(response=dumps(response), status=200, mimetype=\n 'application/json')\n except Exception as e:\n print('Error', e)\n return Response(response=dumps({'result': 6}), status=200, mimetype\n ='application/json')\n\n\nif __name__ == '__main__':\n app.run(host='192.168.2.240', port=5000, debug=True)\n",
"step-5": "from flask import Flask,request,Response\nfrom spamapp.spam import SpamIdentify\nfrom json import dumps,loads\napp = Flask(__name__)\n\nspam = SpamIdentify()\n\[email protected](\"/\",methods=['GET'])\ndef home():\n return Response(response=dumps({\"msg\":\"App successfull\"}), status=200, mimetype='application/json')\n\[email protected](\"/spamapi/\",methods=['GET','POST'])\ndef apicall():\n try:\n predTxt = loads(request.data)\n predTxt = predTxt['input']\n response = spam.predict_data(predTxt)\n return Response(response=dumps(response), status=200, mimetype='application/json')\n except Exception as e:\n print(\"Error\",e)\n return Response(response=dumps({\"result\": 6}), status=200, mimetype='application/json')\n\nif __name__ == \"__main__\":\n app.run(\n host=\"192.168.2.240\", \n port=5000,\n debug=True\n )",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
N = int(input())
K = int(input())
xs = list(map(int, input().split()))
dist = 0
for x in xs:
dist += min(x, K - x)
print(dist * 2)
|
normal
|
{
"blob_id": "a65ab0faf08c13f007a132fb92f358a35834fdb7",
"index": 2556,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in xs:\n dist += min(x, K - x)\nprint(dist * 2)\n",
"step-3": "N = int(input())\nK = int(input())\nxs = list(map(int, input().split()))\ndist = 0\nfor x in xs:\n dist += min(x, K - x)\nprint(dist * 2)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = "playground"
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index = True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = "SELECT top_score, created FROM table3_links WHERE link_id='"+d['link_id']+"'"
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment = d['name'])
obj.update(top_score = d['score'])
else:
source = "http://www.reddit.com/by_id/"+d['link_id']+"/.json"
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data']['permalink']
input['subreddit'] = data['data']['children'][0]['data']['subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id']
input['created'] = int(data['data']['children'][0]['data']['created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create( link_id = d['link_id'],
title = input['title'],
permalink = input['permalink'],
subreddit = input['subreddit'],
selftext = input['selftext'],
subreddit_id = input['subreddit_id'],
created = input['created'],
url = input['url'],
score = input['score'],
top_comment = d['name'],
top_score = d['score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='"+d['link_id']+"' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)
timeslot.update(counts=(count_tmp[0][0]+1))
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
df = sqlContext.read.json("s3n://yy-data/testJSON.json")
# s3n://reddit-comments/2007/RC_2007-10
rdd = df.map(lambda x: {"link_id": x.link_id,
"author": x.author,
"body": x.body,
"created_utc": x.created_utc,
"parent_id": x.parent_id,
"subreddit": x.subreddit,
"subreddit_id": x.subreddit_id,
"name": x.name,
"score": x.score})
test([])
rdd.foreachPartition(test)
|
normal
|
{
"blob_id": "11f29508d52e856f4751a5dc8911a1f1c9832374",
"index": 944,
"step-1": "<mask token>\n",
"step-2": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<mask token>\n",
"step-3": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<mask token>\ntest([])\nrdd.foreachPartition(test)\n",
"step-4": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\ndf = sqlContext.read.json('s3n://yy-data/testJSON.json')\nrdd = df.map(lambda x: {'link_id': x.link_id, 'author': x.author, 'body': x\n .body, 'created_utc': x.created_utc, 'parent_id': x.parent_id,\n 'subreddit': x.subreddit, 'subreddit_id': x.subreddit_id, 'name': x.\n name, 'score': x.score})\ntest([])\nrdd.foreachPartition(test)\n",
"step-5": "def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n\n CASSANDRA_KEYSPACE = \"playground\"\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index = True)\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = \"SELECT top_score, created FROM table3_links WHERE link_id='\"+d['link_id']+\"'\"\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment = d['name'])\n obj.update(top_score = d['score'])\n else:\n source = \"http://www.reddit.com/by_id/\"+d['link_id']+\"/.json\"\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data']['permalink']\n input['subreddit'] = data['data']['children'][0]['data']['subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id'] \n input['created'] = int(data['data']['children'][0]['data']['created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create( link_id = d['link_id'],\n title = input['title'],\n permalink = input['permalink'],\n subreddit = input['subreddit'],\n selftext = input['selftext'],\n subreddit_id = input['subreddit_id'],\n created = input['created'],\n url = input['url'],\n score = input['score'],\n top_comment = d['name'],\n top_score = d['score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\"+d['link_id']+\"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)\n timeslot.update(counts=(count_tmp[0][0]+1))\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\ndf = sqlContext.read.json(\"s3n://yy-data/testJSON.json\")\n# s3n://reddit-comments/2007/RC_2007-10\nrdd = df.map(lambda x: {\"link_id\": x.link_id, \n \"author\": x.author,\n \"body\": x.body,\n \"created_utc\": x.created_utc,\n \"parent_id\": x.parent_id,\n \"subreddit\": x.subreddit,\n \"subreddit_id\": x.subreddit_id,\n \"name\": x.name,\n \"score\": x.score})\ntest([])\nrdd.foreachPartition(test)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from sklearn import metrics
import pandas as pd
import numpy as np
from submission import submission
import argparse
import glob
def calc_auc(subm):
preds=subm['target'].values
labels=subm['labels'].values
if len(set(labels))==1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes='\"'+name+'\"'
os.system(f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}')
def main(nfolds, work_dir):
val_avg_tta_le_auc=None
val_avg_tta_auc = None
tta_type='tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le)>0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type==tta_type:
if le=='le':
val_avg_tta_le_auc=np.mean(a)
else:
val_avg_tta_auc=np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le=='':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = f'test_{le}_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' + name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub=calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' + f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub=None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir',type=str)
parser.add_argument('--folds',type=int, default=0)
if __name__=="__main__":
args=parser.parse_args()
if args.folds==0:
nfolds = len(glob.glob(os.path.join(args.work_dir,'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds=args.folds
main(nfolds,args.work_dir)
|
normal
|
{
"blob_id": "fe0b21deb2e48ad74449b264265729cb328090ea",
"index": 6380,
"step-1": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\n<mask token>\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-3": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-4": "import os\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nfrom submission import submission\nimport argparse\nimport glob\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-5": "import os\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nfrom submission import submission\nimport argparse\nimport glob\n\n\n\ndef calc_auc(subm):\n preds=subm['target'].values\n labels=subm['labels'].values\n if len(set(labels))==1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes='\\\"'+name+'\\\"'\n os.system(f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}')\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc=None\n val_avg_tta_auc = None\n tta_type='tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n\n for fold in range(nfolds):\n if len(le)>0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename=os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type==tta_type:\n if le=='le':\n val_avg_tta_le_auc=np.mean(a)\n else:\n val_avg_tta_auc=np.mean(a)\n\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le=='':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n filename=os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' + name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub=calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' + f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub=None\n\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir',type=str)\nparser.add_argument('--folds',type=int, default=0)\n\n\nif __name__==\"__main__\":\n\n args=parser.parse_args()\n\n if args.folds==0:\n nfolds = len(glob.glob(os.path.join(args.work_dir,'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds=args.folds\n\n main(nfolds,args.work_dir)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from .dla import get_network as get_dla
from lib.utils.tless import tless_config
_network_factory = {'dla': get_dla}
def get_network(cfg):
arch = cfg.network
heads = cfg.heads
head_conv = cfg.head_conv
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _network_factory[arch]
network = get_model(num_layers, heads, head_conv, tless_config.
down_ratio, cfg.det_dir)
return network
|
normal
|
{
"blob_id": "7df94c86ff837acf0f2a78fe1f99919c31bdcb9b",
"index": 4881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-3": "<mask token>\n_network_factory = {'dla': get_dla}\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-4": "from .dla import get_network as get_dla\nfrom lib.utils.tless import tless_config\n_network_factory = {'dla': get_dla}\n\n\ndef get_network(cfg):\n arch = cfg.network\n heads = cfg.heads\n head_conv = cfg.head_conv\n num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0\n arch = arch[:arch.find('_')] if '_' in arch else arch\n get_model = _network_factory[arch]\n network = get_model(num_layers, heads, head_conv, tless_config.\n down_ratio, cfg.det_dir)\n return network\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Secret Garden tests."""
from secret_garden import Decoder, SecretGarden
import random
filename = "pr08_example_data.txt"
key = "Fat Chocobo"
d = Decoder(filename, key)
s = SecretGarden(filename, key)
def test_read_from_file():
"""
Test of function of reading data from file.
:return:
"""
reading_file = d.read_code_from_file()
assert type(reading_file) == list
assert len(reading_file) == 7
assert "\n" not in d.read_code_from_file()
def test_decode_from_base64():
"""
Test of function of decoding messages from base64 to utf-8.
:return:
"""
list_to_be_checked = []
list_of_truth = [")-.7)-AOO", "-57)-0JASJAOOASJ", ")07)2AJSAJAJOAJJAAO", ".7)/AJSSAJSJOOSSOOOS",
"-,70", ",7)-,OAASSOSOAAASAAAAA", ".7).SOSAOJAOOO"]
for x in d.read_code_from_file():
list_to_be_checked.append(d.decode_from_base64(x))
assert list_to_be_checked == list_of_truth
def test_calculate_cipher_step():
"""
Test of function of calculating the cipher step.
:return:
"""
given_value = d.calculate_cipher_step()
assert type(given_value) == int
assert given_value == 1016
new_decoder = Decoder(filename, "HELLO THERE!")
new_value = new_decoder.calculate_cipher_step()
assert new_value != given_value
random_number = random.Random()
assert given_value != random_number
def test_decode():
"""
Test of function of decoding.
:return:
"""
decoding = d.decode()
assert type(decoding) == list
assert len(decoding) == 7
assert decoding[0] == '-12;-1\n\nESS'
assert decoding[-1] == '2;-2\n\nWSWESNESSS'
for x in decoding:
assert "\n" in x
def test_decode_messages():
"""
Test of function of decoding messages in SecretGarden class.
:return:
"""
decoding1 = d.decode()
decoding2 = s.decode_messages()
assert decoding1 == decoding2
decoding3 = SecretGarden(filename, "HELLO, STUDENTS.").decode_messages()
assert decoding1 != decoding3
def test_find_secret_locations():
"""
Test of function of finding secret locations in SecretGarden class.
:return:
"""
list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),
random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random()),
(random.Random(), random.Random()), (random.Random(), random.Random())]
list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]
secrets = s.find_secret_locations()
assert type(secrets) == list
for x in secrets:
assert type(x) == tuple
assert secrets == list_of_truth
assert list_of_random != secrets
assert len(list_of_random) == len(secrets)
|
normal
|
{
"blob_id": "8cfab525ab3a86dd6964475d5621fdc7c6413e38",
"index": 8019,
"step-1": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\n<mask token>\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-2": "<mask token>\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-3": "<mask token>\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-4": "<mask token>\nfrom secret_garden import Decoder, SecretGarden\nimport random\nfilename = 'pr08_example_data.txt'\nkey = 'Fat Chocobo'\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert '\\n' not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [')-.7)-\\x06\\x06AOO', '-57)-0\\x06\\x06JASJAOOASJ',\n ')07)2\\x06\\x06AJSAJAJOAJJAAO', '.7)/\\x06\\x06AJSSAJSJOOSSOOOS',\n '-,70\\x06\\x06', ',7)-,\\x06\\x06OAASSOSOAAASAAAAA',\n '.7).\\x06\\x06SOSAOJAOOO']\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, 'HELLO THERE!')\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert '\\n' in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, 'HELLO, STUDENTS.').decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(),\n random.Random()), (random.Random(), random.Random()), (random.\n Random(), random.Random()), (random.Random(), random.Random()), (\n random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, \n -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-5": "\"\"\"Secret Garden tests.\"\"\"\nfrom secret_garden import Decoder, SecretGarden\nimport random\n\nfilename = \"pr08_example_data.txt\"\nkey = \"Fat Chocobo\"\nd = Decoder(filename, key)\ns = SecretGarden(filename, key)\n\n\ndef test_read_from_file():\n \"\"\"\n Test of function of reading data from file.\n\n :return:\n \"\"\"\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()\n\n\ndef test_decode_from_base64():\n \"\"\"\n Test of function of decoding messages from base64 to utf-8.\n\n :return:\n \"\"\"\n list_to_be_checked = []\n list_of_truth = [\")-.7)-\u0006\u0006AOO\", \"-57)-0\u0006\u0006JASJAOOASJ\", \")07)2\u0006\u0006AJSAJAJOAJJAAO\", \".7)/\u0006\u0006AJSSAJSJOOSSOOOS\",\n \"-,70\u0006\u0006\", \",7)-,\u0006\u0006OAASSOSOAAASAAAAA\", \".7).\u0006\u0006SOSAOJAOOO\"]\n for x in d.read_code_from_file():\n list_to_be_checked.append(d.decode_from_base64(x))\n assert list_to_be_checked == list_of_truth\n\n\ndef test_calculate_cipher_step():\n \"\"\"\n Test of function of calculating the cipher step.\n\n :return:\n \"\"\"\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number\n\n\ndef test_decode():\n \"\"\"\n Test of function of decoding.\n\n :return:\n \"\"\"\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x\n\n\ndef test_decode_messages():\n \"\"\"\n Test of function of decoding messages in SecretGarden class.\n\n :return:\n \"\"\"\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3\n\n\ndef test_find_secret_locations():\n \"\"\"\n Test of function of finding secret locations in SecretGarden class.\n\n :return:\n \"\"\"\n list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),\n random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import math
import pendulum
from none import *
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n' \
f'\n\n' \
f'{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor((passed_days / year_days) * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join(["▓" if i < percent else "░" for i in range(blocks)])
|
normal
|
{
"blob_id": "f54d0eeffa140af9c16a1fedb8dcd7d06ced29f2",
"index": 2395,
"step-1": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-3": "<mask token>\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-4": "import math\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-5": "import math\n\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n' \\\n f'\\n\\n' \\\n f'{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor((passed_days / year_days) * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([\"▓\" if i < percent else \"░\" for i in range(blocks)])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import json
import glob
import sys
searchAreaName = sys.argv[1]
# searchAreaName = "slovenia_177sqkm_shards/20161220-162010-c9e0/slovenia_177sqkm_predicted/predict_slovenia_177sqkm_shard"
print('./{0}_??.txt'.format(searchAreaName))
all_predicts = glob.glob('./{0}_??.txt'.format(searchAreaName))
def getBboxes(bboxes):
return [bb for bb in bboxes if sum(bb) > 0.0]
print(all_predicts)
bboxes = {}
for f in all_predicts:
with open(f) as json_data:
data = json.load(json_data)
outputs = data["outputs"]
for key in outputs:
val = outputs[key]["bbox-list"]
if sum(val[0]) > 0.0:
bboxes[key] = getBboxes(val)
#print outputs
with open('{0}_summary.json'.format(searchAreaName), 'w') as fp:
json.dump(bboxes, fp, indent=2)
print("wrote to {0}_summary.json".format(searchAreaName))
|
normal
|
{
"blob_id": "8f9d823785d42d02a0a3d901d66b46a5cd59cdd7",
"index": 7465,
"step-1": "<mask token>\n\n\ndef getBboxes(bboxes):\n return [bb for bb in bboxes if sum(bb) > 0.0]\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('./{0}_??.txt'.format(searchAreaName))\n<mask token>\n\n\ndef getBboxes(bboxes):\n return [bb for bb in bboxes if sum(bb) > 0.0]\n\n\nprint(all_predicts)\n<mask token>\nfor f in all_predicts:\n with open(f) as json_data:\n data = json.load(json_data)\n outputs = data['outputs']\n for key in outputs:\n val = outputs[key]['bbox-list']\n if sum(val[0]) > 0.0:\n bboxes[key] = getBboxes(val)\nwith open('{0}_summary.json'.format(searchAreaName), 'w') as fp:\n json.dump(bboxes, fp, indent=2)\n print('wrote to {0}_summary.json'.format(searchAreaName))\n",
"step-3": "<mask token>\nsearchAreaName = sys.argv[1]\nprint('./{0}_??.txt'.format(searchAreaName))\nall_predicts = glob.glob('./{0}_??.txt'.format(searchAreaName))\n\n\ndef getBboxes(bboxes):\n return [bb for bb in bboxes if sum(bb) > 0.0]\n\n\nprint(all_predicts)\nbboxes = {}\nfor f in all_predicts:\n with open(f) as json_data:\n data = json.load(json_data)\n outputs = data['outputs']\n for key in outputs:\n val = outputs[key]['bbox-list']\n if sum(val[0]) > 0.0:\n bboxes[key] = getBboxes(val)\nwith open('{0}_summary.json'.format(searchAreaName), 'w') as fp:\n json.dump(bboxes, fp, indent=2)\n print('wrote to {0}_summary.json'.format(searchAreaName))\n",
"step-4": "import json\nimport glob\nimport sys\nsearchAreaName = sys.argv[1]\nprint('./{0}_??.txt'.format(searchAreaName))\nall_predicts = glob.glob('./{0}_??.txt'.format(searchAreaName))\n\n\ndef getBboxes(bboxes):\n return [bb for bb in bboxes if sum(bb) > 0.0]\n\n\nprint(all_predicts)\nbboxes = {}\nfor f in all_predicts:\n with open(f) as json_data:\n data = json.load(json_data)\n outputs = data['outputs']\n for key in outputs:\n val = outputs[key]['bbox-list']\n if sum(val[0]) > 0.0:\n bboxes[key] = getBboxes(val)\nwith open('{0}_summary.json'.format(searchAreaName), 'w') as fp:\n json.dump(bboxes, fp, indent=2)\n print('wrote to {0}_summary.json'.format(searchAreaName))\n",
"step-5": "import json\nimport glob\nimport sys\nsearchAreaName = sys.argv[1]\n# searchAreaName = \"slovenia_177sqkm_shards/20161220-162010-c9e0/slovenia_177sqkm_predicted/predict_slovenia_177sqkm_shard\"\nprint('./{0}_??.txt'.format(searchAreaName))\nall_predicts = glob.glob('./{0}_??.txt'.format(searchAreaName))\n\ndef getBboxes(bboxes):\n return [bb for bb in bboxes if sum(bb) > 0.0]\nprint(all_predicts)\nbboxes = {}\nfor f in all_predicts:\n with open(f) as json_data:\n data = json.load(json_data)\n\n outputs = data[\"outputs\"]\n\n for key in outputs:\n\n val = outputs[key][\"bbox-list\"]\n if sum(val[0]) > 0.0:\n bboxes[key] = getBboxes(val)\n #print outputs\n\nwith open('{0}_summary.json'.format(searchAreaName), 'w') as fp:\n json.dump(bboxes, fp, indent=2)\n print(\"wrote to {0}_summary.json\".format(searchAreaName))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Constants to be used throughout this program
stored here.
"""
ROOT_URL = "https://api.twitter.com"
UPLOAD_URL = "https://upload.twitter.com"
REQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'
AUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'
ACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'
VERSION = '1.1'
USER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'
FRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'
FRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'
FRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'
FOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'
TWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'
TWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'
TWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'
RETWEET_URL = ROOT_URL + "/" + VERSION + "/retweet/create/{tweet_id}.json"
REMOVE_RETWEET_URL = ROOT_URL + "/" + \
VERSION + "/unretweet/create/{tweet_id}.json"
FAVOURITED_TWEETS_URL = ROOT_URL + "/" + VERSION + "/favorites/list.json"
STATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'
MEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'
TRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'
|
normal
|
{
"blob_id": "c907f6b954aa3eae21a54eba9d54c116576bd40a",
"index": 5848,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nROOT_URL = 'https://api.twitter.com'\nUPLOAD_URL = 'https://upload.twitter.com'\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\nVERSION = '1.1'\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + '/' + VERSION + '/retweet/create/{tweet_id}.json'\nREMOVE_RETWEET_URL = (ROOT_URL + '/' + VERSION +\n '/unretweet/create/{tweet_id}.json')\nFAVOURITED_TWEETS_URL = ROOT_URL + '/' + VERSION + '/favorites/list.json'\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-3": "\"\"\"\nConstants to be used throughout this program\nstored here.\n\"\"\"\nROOT_URL = \"https://api.twitter.com\"\nUPLOAD_URL = \"https://upload.twitter.com\"\n\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\n\nVERSION = '1.1'\n\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\n\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + \"/\" + VERSION + \"/retweet/create/{tweet_id}.json\"\nREMOVE_RETWEET_URL = ROOT_URL + \"/\" + \\\n VERSION + \"/unretweet/create/{tweet_id}.json\"\nFAVOURITED_TWEETS_URL = ROOT_URL + \"/\" + VERSION + \"/favorites/list.json\"\n\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\n\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Unsolved:Didn't try coz of this warning:
# If you use Python, then submit solutions on PyPy. Try to write an efficient solution.
from sys import stdin
from collections import defaultdict
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, stdin.readline().strip().split()))
d = defaultdict(int) # frequency of elements in array
maxnum = 0
for num in arr:
d[num] += 1
if num>maxnum: maxnum = num
special = set()
ans = 0
for i in range(n-1):
ssf = arr[i]
for j in range(i+1, n):
ssf += arr[j]
if ssf>maxnum:break # TLE without this condition
if d[ssf] and ssf not in special:
special.add(ssf)
ans += d[ssf]
print(ans)
|
normal
|
{
"blob_id": "789f098fe9186d2fbda5417e9938930c44761b83",
"index": 6760,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(t):\n n = int(input())\n arr = list(map(int, stdin.readline().strip().split()))\n d = defaultdict(int)\n maxnum = 0\n for num in arr:\n d[num] += 1\n if num > maxnum:\n maxnum = num\n special = set()\n ans = 0\n for i in range(n - 1):\n ssf = arr[i]\n for j in range(i + 1, n):\n ssf += arr[j]\n if ssf > maxnum:\n break\n if d[ssf] and ssf not in special:\n special.add(ssf)\n ans += d[ssf]\n print(ans)\n",
"step-3": "<mask token>\nt = int(input())\nfor _ in range(t):\n n = int(input())\n arr = list(map(int, stdin.readline().strip().split()))\n d = defaultdict(int)\n maxnum = 0\n for num in arr:\n d[num] += 1\n if num > maxnum:\n maxnum = num\n special = set()\n ans = 0\n for i in range(n - 1):\n ssf = arr[i]\n for j in range(i + 1, n):\n ssf += arr[j]\n if ssf > maxnum:\n break\n if d[ssf] and ssf not in special:\n special.add(ssf)\n ans += d[ssf]\n print(ans)\n",
"step-4": "from sys import stdin\nfrom collections import defaultdict\nt = int(input())\nfor _ in range(t):\n n = int(input())\n arr = list(map(int, stdin.readline().strip().split()))\n d = defaultdict(int)\n maxnum = 0\n for num in arr:\n d[num] += 1\n if num > maxnum:\n maxnum = num\n special = set()\n ans = 0\n for i in range(n - 1):\n ssf = arr[i]\n for j in range(i + 1, n):\n ssf += arr[j]\n if ssf > maxnum:\n break\n if d[ssf] and ssf not in special:\n special.add(ssf)\n ans += d[ssf]\n print(ans)\n",
"step-5": "# Unsolved:Didn't try coz of this warning:\r\n# If you use Python, then submit solutions on PyPy. Try to write an efficient solution.\r\nfrom sys import stdin\r\nfrom collections import defaultdict\r\nt = int(input())\r\nfor _ in range(t):\r\n n = int(input())\r\n arr = list(map(int, stdin.readline().strip().split()))\r\n\r\n d = defaultdict(int) # frequency of elements in array\r\n maxnum = 0\r\n for num in arr:\r\n d[num] += 1\r\n if num>maxnum: maxnum = num\r\n \r\n special = set()\r\n ans = 0\r\n for i in range(n-1):\r\n ssf = arr[i]\r\n for j in range(i+1, n):\r\n ssf += arr[j]\r\n if ssf>maxnum:break # TLE without this condition\r\n if d[ssf] and ssf not in special:\r\n special.add(ssf)\r\n ans += d[ssf]\r\n print(ans)\r\n \r\n\r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
# Convert the ALPHABET to list
ALPHABET = [i for i in ALPHABET]
output_string = ''
input_string = input('Enter a String : ')
key = int(input('Enter the key: '))
for letter in input_string:
if letter in input_string:
# ALPHABET.index(letter) returns the index of that letter in the ALPHABET list
# then we can add the key to that index to get the letter
# then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list
output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]
else:
output_string += letter
print(f'Encoded String is {output_string}')
|
normal
|
{
"blob_id": "b2db622596d0dff970e44759d25360a62f5fea83",
"index": 4725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-3": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\nkey = int(input('Enter the key: '))\nfor letter in input_string:\n if letter in input_string:\n output_string += ALPHABET[(ALPHABET.index(letter) + key) % 26]\n else:\n output_string += letter\nprint(f'Encoded String is {output_string}')\n",
"step-4": "ALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n# Convert the ALPHABET to list\nALPHABET = [i for i in ALPHABET]\noutput_string = ''\ninput_string = input('Enter a String : ')\n\nkey = int(input('Enter the key: '))\n\nfor letter in input_string:\n if letter in input_string:\n # ALPHABET.index(letter) returns the index of that letter in the ALPHABET list\n # then we can add the key to that index to get the letter\n # then we take the mod of that so if the letter is x and 10 it cycle back to the beginning of the list\n output_string += ALPHABET[(ALPHABET.index(letter)+key) % 26]\n else:\n output_string += letter\n\nprint(f'Encoded String is {output_string}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Solution to Codeforces problem 50A
Copyright (c) GeneralMing. All rights reserved.
https://github.com/GeneralMing/codeforces
"""
n = input().split()
n[0] = int(n[0])
n[1] = int(n[1])
print((n[0]*n[1])//2)
|
normal
|
{
"blob_id": "41a80feeb1fdc8ad783706ad261f5fc1124371d6",
"index": 8216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(n[0] * n[1] // 2)\n",
"step-3": "<mask token>\nn = input().split()\nn[0] = int(n[0])\nn[1] = int(n[1])\nprint(n[0] * n[1] // 2)\n",
"step-4": "\"\"\"\n\tSolution to Codeforces problem 50A\n\tCopyright (c) GeneralMing. All rights reserved.\n\n\thttps://github.com/GeneralMing/codeforces\n\"\"\"\n\nn = input().split()\nn[0] = int(n[0])\nn[1] = int(n[1])\nprint((n[0]*n[1])//2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='ckanext-MYEXTENSION',
version=version,
description="description",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ldhspace',
author_email='[email protected]',
url='www.naver.com',
license='free',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.MYEXTENSION'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points=\
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
""",
)
|
normal
|
{
"blob_id": "9d2c0d59b0b2b4e4fca942e648059738053c53d0",
"index": 9376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='[email protected]', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-3": "<mask token>\nversion = '0.1'\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='[email protected]', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-4": "from setuptools import setup, find_packages\nimport sys, os\nversion = '0.1'\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='[email protected]', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-5": "from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1'\n\nsetup(\n\tname='ckanext-MYEXTENSION',\n\tversion=version,\n\tdescription=\"description\",\n\tlong_description=\"\"\"\\\n\t\"\"\",\n\tclassifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n\tkeywords='',\n\tauthor='ldhspace',\n\tauthor_email='[email protected]',\n\turl='www.naver.com',\n\tlicense='free',\n\tpackages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n\tnamespace_packages=['ckanext', 'ckanext.MYEXTENSION'],\n\tinclude_package_data=True,\n\tzip_safe=False,\n\tinstall_requires=[\n\t\t# -*- Extra requirements: -*-\n\t],\n\tentry_points=\\\n\t\"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\",\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#coding: utf-8
"""
1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.
2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:
https://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#
Use a função read_csv() para abrir os arquivos
"""
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "d95d899c6eae5a90c90d3d920ee40b38bf304805",
"index": 532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pass\n",
"step-3": "#coding: utf-8\n\"\"\" \n1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.\n\n\n2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:\nhttps://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#\nUse a função read_csv() para abrir os arquivos\n\n\"\"\"\n\nif __name__ == \"__main__\":\n\tpass",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def Merge (left,right,merged):
#Ф-ция объединения и сравнения элементов массивов
left_cursor,right_cursor=0,0
while left_cursor<len(left) and right_cursor<len(right):
if left[left_cursor]<=right[right_cursor]:
merged[left_cursor+right_cursor]=left[left_cursor]
left_cursor+=1
else:
merged[left_cursor+right_cursor]=right[right_cursor]
right_cursor+=1
for left_cursor in range(left_cursor,len(left)):
merged[left_cursor+right_cursor]=left[left_cursor]
for right_cursor in range(right_cursor,len(right)):
merged[left_cursor+right_cursor]=right[right_cursor]
return merged
def MergeSort(array):
#Основная рекурсивная функция
if len(array)<=1:
return array
mid=len(array)//2
left,right=MergeSort(array[:mid]),MergeSort(array[mid:])
return Merge(left,right,array.copy())
"""
a=[2,45,1,4,66,34]
print(MergeSort(a))
print(a)
"""
|
normal
|
{
"blob_id": "c64c542b57107c06de2ce0751075a81fcb195b61",
"index": 4293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef MergeSort(array):\n if len(array) <= 1:\n return array\n mid = len(array) // 2\n left, right = MergeSort(array[:mid]), MergeSort(array[mid:])\n return Merge(left, right, array.copy())\n\n\n<mask token>\n",
"step-3": "def Merge(left, right, merged):\n left_cursor, right_cursor = 0, 0\n while left_cursor < len(left) and right_cursor < len(right):\n if left[left_cursor] <= right[right_cursor]:\n merged[left_cursor + right_cursor] = left[left_cursor]\n left_cursor += 1\n else:\n merged[left_cursor + right_cursor] = right[right_cursor]\n right_cursor += 1\n for left_cursor in range(left_cursor, len(left)):\n merged[left_cursor + right_cursor] = left[left_cursor]\n for right_cursor in range(right_cursor, len(right)):\n merged[left_cursor + right_cursor] = right[right_cursor]\n return merged\n\n\ndef MergeSort(array):\n if len(array) <= 1:\n return array\n mid = len(array) // 2\n left, right = MergeSort(array[:mid]), MergeSort(array[mid:])\n return Merge(left, right, array.copy())\n\n\n<mask token>\n",
"step-4": "def Merge (left,right,merged):\n #Ф-ция объединения и сравнения элементов массивов \n left_cursor,right_cursor=0,0\n while left_cursor<len(left) and right_cursor<len(right):\n if left[left_cursor]<=right[right_cursor]:\n merged[left_cursor+right_cursor]=left[left_cursor]\n left_cursor+=1\n else:\n merged[left_cursor+right_cursor]=right[right_cursor]\n right_cursor+=1\n for left_cursor in range(left_cursor,len(left)):\n merged[left_cursor+right_cursor]=left[left_cursor]\n for right_cursor in range(right_cursor,len(right)):\n merged[left_cursor+right_cursor]=right[right_cursor]\n return merged\n\ndef MergeSort(array):\n #Основная рекурсивная функция\n if len(array)<=1:\n return array\n mid=len(array)//2\n left,right=MergeSort(array[:mid]),MergeSort(array[mid:])\n return Merge(left,right,array.copy())\n\n\n\"\"\"\na=[2,45,1,4,66,34]\nprint(MergeSort(a))\nprint(a) \n\"\"\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import thread
import time
import ctypes
lib = ctypes.CDLL('/home/ubuntu/workspace/35SmartPy/CAN/brain/CANlib.so')
init = lib.init
read = lib.readGun
read.restype = ctypes.POINTER(ctypes.c_ubyte * 8)
send = lib.sendBrake
init()
|
normal
|
{
"blob_id": "866571341a587c8b1b25437f5815429875bbe5ad",
"index": 9285,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ninit()\n",
"step-3": "<mask token>\nlib = ctypes.CDLL('/home/ubuntu/workspace/35SmartPy/CAN/brain/CANlib.so')\ninit = lib.init\nread = lib.readGun\nread.restype = ctypes.POINTER(ctypes.c_ubyte * 8)\nsend = lib.sendBrake\ninit()\n",
"step-4": "import thread\nimport time\nimport ctypes\nlib = ctypes.CDLL('/home/ubuntu/workspace/35SmartPy/CAN/brain/CANlib.so')\ninit = lib.init\nread = lib.readGun\nread.restype = ctypes.POINTER(ctypes.c_ubyte * 8)\nsend = lib.sendBrake\ninit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def countdown(n):
def next():
nonlocal n
r = n
n -= 1
return r
return next
a = countdown(12)
while True:
v = a()
if not v:
break
|
normal
|
{
"blob_id": "01eef391f6d37d1e74cb032c5b27e1d8fc4395da",
"index": 6122,
"step-1": "<mask token>\n",
"step-2": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\n<mask token>\n",
"step-3": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\n<mask token>\nwhile True:\n v = a()\n if not v:\n break\n",
"step-4": "def countdown(n):\n\n def next():\n nonlocal n\n r = n\n n -= 1\n return r\n return next\n\n\na = countdown(12)\nwhile True:\n v = a()\n if not v:\n break\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.5 on 2021-05-30 14:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fuser', '0009_movement_type'),
]
operations = [
migrations.AlterField(
model_name='movementpassmodel',
name='movement_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'),
),
]
|
normal
|
{
"blob_id": "848374ea7d706bbd2ef5a76489cabeff998acb82",
"index": 6040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fuser', '0009_movement_type')]\n operations = [migrations.AlterField(model_name='movementpassmodel',\n name='movement_type', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fuser', '0009_movement_type')]\n operations = [migrations.AlterField(model_name='movementpassmodel',\n name='movement_type', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]\n",
"step-5": "# Generated by Django 3.1.5 on 2021-05-30 14:27\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fuser', '0009_movement_type'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='movementpassmodel',\n name='movement_type',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import struct
from coapthon import defines
from coapthon.utils import byte_len, bit_len, parse_blockwise
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options["Block2"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = (2, byte, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
elif option.number == defines.inv_options["Block1"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
self._parent.blockwise[key] = (1, 0, num, m, size)
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = (2, 0, 0, 1, 1024)
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = (num, m, size)
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = (2, byte, num, m, size)
elif block == 1:
if m == 1:
response.code = defines.responses["CONTINUE"]
response.block1 = (num, m, size)
return response
|
normal
|
{
"blob_id": "70d740a7003ca3f2d2cde039b2fc470ef2165e77",
"index": 7078,
"step-1": "<mask token>\n\n\nclass BlockwiseLayer(object):\n <mask token>\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-2": "<mask token>\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-3": "<mask token>\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-4": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-5": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n\n__author__ = 'Giacomo Tanganelli'\n__version__ = \"2.0\"\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options[\"Block2\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n elif option.number == defines.inv_options[\"Block1\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n self._parent.blockwise[key] = (1, 0, num, m, size)\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = (2, 0, 0, 1, 1024)\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = (num, m, size)\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n\n elif block == 1:\n if m == 1:\n response.code = defines.responses[\"CONTINUE\"]\n response.block1 = (num, m, size)\n return response\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from mikeio.spatial import GeometryPoint2D, GeometryPoint3D
# https://www.ogc.org/standard/sfa/
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == "POINT (10 20)"
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == "POINT (-5642.5 120.1)"
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == "POINT Z (10 20 30)"
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
def test_point3d_to_shapely():
p = GeometryPoint3D(10, 20, -1)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.z == -1
assert sp.wkt == p.wkt
|
normal
|
{
"blob_id": "ae45a4967a8ee63c27124d345ad4dc0c01033c0e",
"index": 6749,
"step-1": "<mask token>\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-4": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-5": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n# https://www.ogc.org/standard/sfa/\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == \"POINT (10 20)\"\n\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == \"POINT (-5642.5 120.1)\"\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == \"POINT Z (10 20 30)\"\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
number = int(input("Enter a number, and I'll tell you if it's even or odd: "))
if number % 2 == 0:
print(f"{number} is an even number.")
else:
print(f"{number} is an odd number.")
|
normal
|
{
"blob_id": "b147a22d6bd12a954c0d85c11e578a67f0a51332",
"index": 3025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif number % 2 == 0:\n print(f'{number} is an even number.')\nelse:\n print(f'{number} is an odd number.')\n",
"step-3": "number = int(input(\"Enter a number, and I'll tell you if it's even or odd: \"))\nif number % 2 == 0:\n print(f'{number} is an even number.')\nelse:\n print(f'{number} is an odd number.')\n",
"step-4": "number = int(input(\"Enter a number, and I'll tell you if it's even or odd: \"))\n\nif number % 2 == 0:\n print(f\"{number} is an even number.\")\nelse:\n print(f\"{number} is an odd number.\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
something1
x = session.query(x).filter(y).count()
something2
y = session.query(
models.User, models.X,
).filter(
models.User.time > start_time,
models.User.id == user_id,
).count()
def something3():
x = session.query(
models.Review,
).filter(
models.Review.time < end_time,
).count()
something4
x = session.query(x, y).filter(bla).count()
x = session.query(x.X, y).filter(y > user_id).count()
x = session.query(
x.X, y.Y
).filter(x.X == 5).count()
something5
|
normal
|
{
"blob_id": "5b91b7025b0e574d45f95a0585128018d83c17ea",
"index": 563,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\n<mask token>\n",
"step-3": "something1\n<mask token>\nsomething2\n<mask token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\n<mask token>\nsomething5\n",
"step-4": "something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(models.User, models.X).filter(models.User.time >\n start_time, models.User.id == user_id).count()\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(x.X, y.Y).filter(x.X == 5).count()\nsomething5\n",
"step-5": "something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(\n models.User, models.X,\n).filter(\n models.User.time > start_time,\n models.User.id == user_id,\n).count()\ndef something3():\n x = session.query(\n models.Review,\n ).filter(\n models.Review.time < end_time,\n ).count()\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(\n x.X, y.Y\n).filter(x.X == 5).count()\nsomething5\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from odoo import models, fields, api, _
class SaleAdvancePaymentInv(models.TransientModel):
_inherit = "sale.advance.payment.inv"
date_start_invoice_timesheet = fields.Date(
string='Start Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
date_end_invoice_timesheet = fields.Date(
string='End Date',
help="Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. "
"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will "
"be invoiced without distinction.", required=True)
|
normal
|
{
"blob_id": "75b1674066958a8fa28e74121a35d688bcc473d9",
"index": 9743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = 'sale.advance.payment.inv'\n date_start_invoice_timesheet = fields.Date(string='Start Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n date_end_invoice_timesheet = fields.Date(string='End Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n",
"step-4": "from odoo import models, fields, api, _\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = 'sale.advance.payment.inv'\n date_start_invoice_timesheet = fields.Date(string='Start Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n date_end_invoice_timesheet = fields.Date(string='End Date', help=\n 'Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will be invoiced without distinction.'\n , required=True)\n",
"step-5": "from odoo import models, fields, api, _\n\n\nclass SaleAdvancePaymentInv(models.TransientModel):\n _inherit = \"sale.advance.payment.inv\"\n\n date_start_invoice_timesheet = fields.Date(\n string='Start Date',\n help=\"Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. \"\n \"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will \"\n \"be invoiced without distinction.\", required=True)\n date_end_invoice_timesheet = fields.Date(\n string='End Date',\n help=\"Only timesheets not yet invoiced (and validated, if applicable) from this period will be invoiced. \"\n \"If the period is not indicated, all timesheets not yet invoiced (and validated, if applicable) will \"\n \"be invoiced without distinction.\", required=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Created by Jake Hansen for Zebra interview take home assessment, July 2020.
import csv, os, sys, pickle
from datetime import date
#Class For storing information about each file generally. Helpful for future
#use cases to remember the indicies from a file, if file has thousands of fields
#Also can be used as a log to store daily number of 'good' vs 'bad' rows
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
# getHeaderIndexes(indices, headers)
# Requires: Pre-populated indices dictionary, the header's row from a CSV file with
# naming convention conforming to the schema output from the directions
# Effects: Determines if file has the necessary colums to match the desired output
# schema
# Modifies: The indices variable, returning the correct indices within the csv row
def getHeaderIndexes(indices, headers):
counter = -1
a,b,c,d,e,f,g = False, False, False, False,False,False,False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if a == True and b == True and c == True and d == True and e == True and f == True and g == True:
valid = True
else:
valid = False
return indices, valid
# isRowValid(indices,row)
# Requires: a valid CSV file with columns necessary to match the expected output
# Effects: Determines if a single row should be added to the final output, or if
# the row is missing data / has incorrect data types for the field and thus
# will not be added to the output but instead printed out
# Modifies: N/A
def isRowValid(indices, row):
#String Non-Nullables
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':
pass
else:
return False
#Float Non Nullables
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
#String Nullables
sNs = ['Phone Number']
#No Check Required, because it can be nullable or a string. I do assume that
#it is required to have a "Phone Number" column, which is checked for in getHeaderIndexes
return True
# addUsableRow(indices, row, finalOutput)
# Requires: The row is known to follow the output schema as specificed in the requirements
# Effects: Adds row variables in the order specified in the output schema
# Modifies: the final output variable
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = '"'+ pn + '","' + cid + '","' + cpac + '","' + rl + '","' + phn + '","' + ad + '","' + zc + '"' + '\n'
finalOutput += temp
return finalOutput
# addErrorRow(indices, row, errorFinalOutput)
# Requires: The row does not follow the output schema
# Effects: adds the row to the error output variable that will be printed out
# Modifies: the error final output string which gets printed at the end of the daily
# job / procedure / script/ whatever The Zebra prefers to call these python data projects
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
#Variables and data structures
finalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\n'
errorFinalOutput = ''
# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
# pickelFileName = 'pickleTest/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {
"Provider Name": 0,
"CampaignID": 0,
"Cost Per Ad Click": 0,
"Redirect Link": 0,
"Phone Number": 0,
"Address": 0,
"Zipcode": 0
}
#InputFiles in list form
# inputList = [
# 'inputFilesTest/Auto.csv',
# 'inputFilesTest/Home.csv'
# ]
# InputFiles in a directory
inputDirectory = 'inputFiles'
#check if files are too large, or non-csv files
currentLines = 0
for file in os.listdir(inputDirectory):
# for file in inputList:
# currentLines += sum(1 for line in open(file))
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
#Main Algorithm loop through all files in the list
for file in os.listdir(inputDirectory):
# for file in inputList:
#usableRows and errorRows used for storing information from each data source
usableRows = 0
errorRows = 0
# with open(file, newline='') as f:
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent,row, finalOutput)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
f.close()
#Add dataSource Information for possible future needs and logging purposes
newDataSource = DataSource(file,usableRows, errorRows, indices)
dataSources.append(newDataSource)
#Create file with rows containing correct schema
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
#print the incorrect rows
print(errorFinalOutput)
#Create Pickel file containing data source info for daily logging
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
#Create Pickle File dictionary with indices specific info for filenames
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
#Thank you line
print("Thanks for taking the time to look at my code and consider me for this position. Cheers!")
|
normal
|
{
"blob_id": "38c1b82a29a5ad0b4581e63fb083ca2487a79817",
"index": 9544,
"step-1": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\n<mask token>\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-4": "import csv, os, sys, pickle\nfrom datetime import date\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-5": "#Created by Jake Hansen for Zebra interview take home assessment, July 2020.\nimport csv, os, sys, pickle\nfrom datetime import date\n\n#Class For storing information about each file generally. Helpful for future\n#use cases to remember the indicies from a file, if file has thousands of fields\n#Also can be used as a log to store daily number of 'good' vs 'bad' rows\nclass DataSource:\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n# getHeaderIndexes(indices, headers)\n# Requires: Pre-populated indices dictionary, the header's row from a CSV file with\n# naming convention conforming to the schema output from the directions\n# Effects: Determines if file has the necessary colums to match the desired output\n# schema\n# Modifies: The indices variable, returning the correct indices within the csv row\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a,b,c,d,e,f,g = False, False, False, False,False,False,False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if a == True and b == True and c == True and d == True and e == True and f == True and g == True:\n valid = True\n else:\n valid = False\n return indices, valid\n\n# isRowValid(indices,row)\n# Requires: a valid CSV file with columns necessary to match the expected output\n# Effects: Determines if a single row should be added to the final output, or if\n# the row is missing data / has incorrect data types for the field and thus\n# will not be added to the output but instead printed out\n# Modifies: N/A\ndef isRowValid(indices, row):\n #String Non-Nullables\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n\n #Float Non Nullables\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n\n #String Nullables\n sNs = ['Phone Number']\n #No Check Required, because it can be nullable or a string. I do assume that\n #it is required to have a \"Phone Number\" column, which is checked for in getHeaderIndexes\n\n return True\n\n# addUsableRow(indices, row, finalOutput)\n# Requires: The row is known to follow the output schema as specificed in the requirements\n# Effects: Adds row variables in the order specified in the output schema\n# Modifies: the final output variable\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n\n temp = '\"'+ pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' + phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n'\n finalOutput += temp\n return finalOutput\n\n# addErrorRow(indices, row, errorFinalOutput)\n# Requires: The row does not follow the output schema\n# Effects: adds the row to the error output variable that will be printed out\n# Modifies: the error final output string which gets printed at the end of the daily\n# job / procedure / script/ whatever The Zebra prefers to call these python data projects\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n#Variables and data structures\nfinalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\\n'\nerrorFinalOutput = ''\n# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\n# pickelFileName = 'pickleTest/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {\n \"Provider Name\": 0,\n \"CampaignID\": 0,\n \"Cost Per Ad Click\": 0,\n \"Redirect Link\": 0,\n \"Phone Number\": 0,\n \"Address\": 0,\n \"Zipcode\": 0\n}\n\n#InputFiles in list form\n# inputList = [\n# 'inputFilesTest/Auto.csv',\n# 'inputFilesTest/Home.csv'\n# ]\n\n# InputFiles in a directory\ninputDirectory = 'inputFiles'\n\n#check if files are too large, or non-csv files\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n # currentLines += sum(1 for line in open(file))\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\n\n#Main Algorithm loop through all files in the list\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n #usableRows and errorRows used for storing information from each data source\n usableRows = 0\n errorRows = 0\n # with open(file, newline='') as f:\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent,row, finalOutput)\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n\n f.close()\n #Add dataSource Information for possible future needs and logging purposes\n newDataSource = DataSource(file,usableRows, errorRows, indices)\n dataSources.append(newDataSource)\n\n#Create file with rows containing correct schema\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\n\n#print the incorrect rows\nprint(errorFinalOutput)\n\n#Create Pickel file containing data source info for daily logging\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\n\n#Create Pickle File dictionary with indices specific info for filenames\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\n\n#Thank you line\nprint(\"Thanks for taking the time to look at my code and consider me for this position. Cheers!\")\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
from unittest import TestCase
# auto-test toggled test class to monitor changes to is_palindrome function
class Test_is_palindrome(TestCase):
def test_is_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("Asdfdsa"))
self.assertTrue(is_palindrome("asDf'ssfdsa"))
def test_is_palindrome_with_non_alpha(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("asdf'ssfdsa"))
def test_is_not_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertFalse(is_palindrome("asdfddsa"))
self.assertFalse(is_palindrome("hello world"))
|
normal
|
{
"blob_id": "785b54dce76d6906df513a8bde0110ab6fd63357",
"index": 7083,
"step-1": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome('asdfddsa'))\n self.assertFalse(is_palindrome('hello world'))\n",
"step-4": "from unittest import TestCase\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome('asdfddsa'))\n self.assertFalse(is_palindrome('hello world'))\n",
"step-5": "from unittest import TestCase\n\n# auto-test toggled test class to monitor changes to is_palindrome function\nclass Test_is_palindrome(TestCase):\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"Asdfdsa\"))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome(\"asdfddsa\"))\n self.assertFalse(is_palindrome(\"hello world\"))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
"""
Copyright (C) 2014, Jill Huchital
"""
# test comment
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
# return render_template('index.html', greeting='here we are then')
return "index"
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods = ['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods = ['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods = ['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods = ['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods = ['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods = ['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods = ['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1,
'api_call': api_call,
'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
# debug = True makes the server restart when the Python files change. TODO: make it
# depend on whether we're running locally or in production.
ALL_DBS = connect_to_db()
# create_playlists(ALL_DBS)
app.run(debug = True)
|
normal
|
{
"blob_id": "5193de15052f81460a23d993cfa039fa90c9de5e",
"index": 897,
"step-1": "<mask token>\n\n\[email protected]('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\[email protected]('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n<mask token>\n\n\[email protected]('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n<mask token>\n\n\[email protected]('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\[email protected]('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return 'index'\n\n\[email protected]('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\[email protected]('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\[email protected]('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\[email protected]('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\[email protected]('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\[email protected]('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef index():\n return 'index'\n\n\[email protected]('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\[email protected]('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\[email protected]('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\[email protected]('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\[email protected]('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\[email protected]('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\[email protected]('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-4": "<mask token>\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\nALL_DBS = None\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return 'index'\n\n\[email protected]('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\[email protected]('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\[email protected]('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\[email protected]('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\[email protected]('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\[email protected]('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\[email protected]('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\[email protected]('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-5": "\"\"\"\nCopyright (C) 2014, Jill Huchital\n\"\"\"\n\n# test comment\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\n\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\n\nALL_DBS = None\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n # return render_template('index.html', greeting='here we are then')\n return \"index\"\n\[email protected]('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\[email protected]('/tools/')\ndef tools():\n return render_template('tools.html')\n\[email protected]('/api/1.0/create_playlists', methods = ['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\[email protected]('/api/1.0/get_playlists', methods = ['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\[email protected]('/api/1.0/get_all_categories', methods = ['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\[email protected]('/api/1.0/get_all_topics', methods = ['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\[email protected]('/api/1.0/add_category', methods = ['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\[email protected]('/api/1.0/add_topic', methods = ['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\[email protected]('/api/1.0/<string:api_call>', methods = ['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1,\n 'api_call': api_call,\n 'param_2': param2}\n return jsonify(retval)\n\nif __name__ == '__main__':\n # debug = True makes the server restart when the Python files change. TODO: make it\n # depend on whether we're running locally or in production.\n ALL_DBS = connect_to_db()\n # create_playlists(ALL_DBS)\n app.run(debug = True)\n",
"step-ids": [
6,
9,
11,
13,
14
]
}
|
[
6,
9,
11,
13,
14
] |
from typing import Optional,List
from fastapi import FastAPI
from pydantic import BaseModel, Field
from redisqueue import RedisQueue,MyRedis
import random
class Award(BaseModel):
name: str
count: int
class Item(BaseModel):
luckname: str = Field(...,title="抽奖规则名称",max_lenght = 300)
total: int = Field(...,title="抽奖总人数",gt=0)
award: Optional[List[Award]] = Field(None,title="奖品列表")
other: str = Field(...,title="参与奖或者未中奖")
app = FastAPI()
class ResSuccess(BaseModel):
ret: int = 0
data
@app.get('/')
def read_root():
return {"Hello":"World"}
@app.post(
'/delect',
tags = ["抽奖接口"],
summary = "删除抽奖规则"
)
def delect(name:str):
rq = RedisQueue(name)
if rq.qsize:
rq.lpop(name)
return {
'ret':0,
'msg':"删除成功"
}
@app.post(
'/creat',
tags = ['抽奖接口'],
summary="创建抽奖规则"
)
def creat(item: Item):
"""
通过该接口可以创建一个抽奖规则
"""
myredis = MyRedis()
rq = RedisQueue(item.luckname)
print("ok")
if rq.qsize():
return {
"ret":500,
"msg":"该抽奖已经存在,请删除后重试"
}
result = {"ret":0, "item":item}
awardlist = item.award
lucklist =[]
luckdict = {}
for ward in awardlist:
luckdict[ward.name] = ward.count
for i in range(ward.count):
lucklist.append(ward.name)
othercount = item.total - len(lucklist)
if othercount:
luckdict[item.other] = othercount
others = [item.other] * othercount
lucklist = lucklist + others
random.shuffle(lucklist)
print(lucklist)
for luck in lucklist:
rq.put(luck)
myredis.hmset(item.luckname,luckdict)
result = {
'ret': 0,
'msg': "succses"
}
return result
@app.get('/luck', tags = ["抽奖接口"], summary="抽奖接口")
def luck(id: int,luckname: str):
"""
开始抽奖
"""
rd = RedisQueue(luckname)
myredis = MyRedis()
winner = luckname+"_winner"
if myredis.hexists(winner,id):
return {
"ret":0,
"msg":"您已经抽过了,不能再抽了"
}
award = rd.get_nowait()
if award:
myredis.hset(winner,id,award)
myredis.hincrby(luckname,award,-1)
result = {
"ret":0,
'data':{
"flag":1,
"msg":"恭喜你中奖了",
"award":award
}
}
else:
result = {
"ret":0,
'data':{
"flag":0,
"msg":"奖抽完了",
}
}
return result
@app.get('/luckman',tags = ["抽奖接口"],summary="查看中奖名单")
def luckman(luckname: str):
myredis = MyRedis()
winner = luckname + "_winner"
winnerlist = myredis.hgetall(winner)
print(winnerlist)
return {
"ret":0,
"data":winnerlist
}
@app.get('/remaining',tags = ["抽奖接口"],summary="查看剩余奖品列表")
def Remaining(luckname: str):
myredis = MyRedis()
remainlist = myredis.hgetall(luckname)
print(remainlist)
return {
"ret":0,
"data":remainlist
}
|
normal
|
{
"blob_id": "4550ed971eef36badf46a44adcc593324a5292cf",
"index": 2637,
"step-1": "<mask token>\n\n\nclass Award(BaseModel):\n name: str\n count: int\n\n\nclass Item(BaseModel):\n luckname: str = Field(..., title='抽奖规则名称', max_lenght=300)\n total: int = Field(..., title='抽奖总人数', gt=0)\n award: Optional[List[Award]] = Field(None, title='奖品列表')\n other: str = Field(..., title='参与奖或者未中奖')\n\n\n<mask token>\n\n\nclass ResSuccess(BaseModel):\n ret: int = 0\n data\n\n\n<mask token>\n\n\[email protected]('/delect', tags=['抽奖接口'], summary='删除抽奖规则')\ndef delect(name: str):\n rq = RedisQueue(name)\n if rq.qsize:\n rq.lpop(name)\n return {'ret': 0, 'msg': '删除成功'}\n\n\[email protected]('/creat', tags=['抽奖接口'], summary='创建抽奖规则')\ndef creat(item: Item):\n \"\"\"\n 通过该接口可以创建一个抽奖规则\n \"\"\"\n myredis = MyRedis()\n rq = RedisQueue(item.luckname)\n print('ok')\n if rq.qsize():\n return {'ret': 500, 'msg': '该抽奖已经存在,请删除后重试'}\n result = {'ret': 0, 'item': item}\n awardlist = item.award\n lucklist = []\n luckdict = {}\n for ward in awardlist:\n luckdict[ward.name] = ward.count\n for i in range(ward.count):\n lucklist.append(ward.name)\n othercount = item.total - len(lucklist)\n if othercount:\n luckdict[item.other] = othercount\n others = [item.other] * othercount\n lucklist = lucklist + others\n random.shuffle(lucklist)\n print(lucklist)\n for luck in lucklist:\n rq.put(luck)\n myredis.hmset(item.luckname, luckdict)\n result = {'ret': 0, 'msg': 'succses'}\n return result\n\n\[email protected]('/luck', tags=['抽奖接口'], summary='抽奖接口')\ndef luck(id: int, luckname: str):\n \"\"\"\n 开始抽奖\n \"\"\"\n rd = RedisQueue(luckname)\n myredis = MyRedis()\n winner = luckname + '_winner'\n if myredis.hexists(winner, id):\n return {'ret': 0, 'msg': '您已经抽过了,不能再抽了'}\n award = rd.get_nowait()\n if award:\n myredis.hset(winner, id, award)\n myredis.hincrby(luckname, award, -1)\n result = {'ret': 0, 'data': {'flag': 1, 'msg': '恭喜你中奖了', 'award':\n award}}\n else:\n result = {'ret': 0, 'data': {'flag': 0, 'msg': '奖抽完了'}}\n return result\n\n\n<mask token>\n\n\[email protected]('/remaining', tags=['抽奖接口'], summary='查看剩余奖品列表')\ndef Remaining(luckname: str):\n myredis = MyRedis()\n remainlist = myredis.hgetall(luckname)\n print(remainlist)\n return {'ret': 0, 'data': remainlist}\n",
"step-2": "<mask token>\n\n\nclass Award(BaseModel):\n name: str\n count: int\n\n\nclass Item(BaseModel):\n luckname: str = Field(..., title='抽奖规则名称', max_lenght=300)\n total: int = Field(..., title='抽奖总人数', gt=0)\n award: Optional[List[Award]] = Field(None, title='奖品列表')\n other: str = Field(..., title='参与奖或者未中奖')\n\n\n<mask token>\n\n\nclass ResSuccess(BaseModel):\n ret: int = 0\n data\n\n\n<mask token>\n\n\[email protected]('/delect', tags=['抽奖接口'], summary='删除抽奖规则')\ndef delect(name: str):\n rq = RedisQueue(name)\n if rq.qsize:\n rq.lpop(name)\n return {'ret': 0, 'msg': '删除成功'}\n\n\[email protected]('/creat', tags=['抽奖接口'], summary='创建抽奖规则')\ndef creat(item: Item):\n \"\"\"\n 通过该接口可以创建一个抽奖规则\n \"\"\"\n myredis = MyRedis()\n rq = RedisQueue(item.luckname)\n print('ok')\n if rq.qsize():\n return {'ret': 500, 'msg': '该抽奖已经存在,请删除后重试'}\n result = {'ret': 0, 'item': item}\n awardlist = item.award\n lucklist = []\n luckdict = {}\n for ward in awardlist:\n luckdict[ward.name] = ward.count\n for i in range(ward.count):\n lucklist.append(ward.name)\n othercount = item.total - len(lucklist)\n if othercount:\n luckdict[item.other] = othercount\n others = [item.other] * othercount\n lucklist = lucklist + others\n random.shuffle(lucklist)\n print(lucklist)\n for luck in lucklist:\n rq.put(luck)\n myredis.hmset(item.luckname, luckdict)\n result = {'ret': 0, 'msg': 'succses'}\n return result\n\n\[email protected]('/luck', tags=['抽奖接口'], summary='抽奖接口')\ndef luck(id: int, luckname: str):\n \"\"\"\n 开始抽奖\n \"\"\"\n rd = RedisQueue(luckname)\n myredis = MyRedis()\n winner = luckname + '_winner'\n if myredis.hexists(winner, id):\n return {'ret': 0, 'msg': '您已经抽过了,不能再抽了'}\n award = rd.get_nowait()\n if award:\n myredis.hset(winner, id, award)\n myredis.hincrby(luckname, award, -1)\n result = {'ret': 0, 'data': {'flag': 1, 'msg': '恭喜你中奖了', 'award':\n award}}\n else:\n result = {'ret': 0, 'data': {'flag': 0, 'msg': '奖抽完了'}}\n return result\n\n\[email protected]('/luckman', tags=['抽奖接口'], summary='查看中奖名单')\ndef luckman(luckname: str):\n myredis = MyRedis()\n winner = luckname + '_winner'\n winnerlist = myredis.hgetall(winner)\n print(winnerlist)\n return {'ret': 0, 'data': winnerlist}\n\n\[email protected]('/remaining', tags=['抽奖接口'], summary='查看剩余奖品列表')\ndef Remaining(luckname: str):\n myredis = MyRedis()\n remainlist = myredis.hgetall(luckname)\n print(remainlist)\n return {'ret': 0, 'data': remainlist}\n",
"step-3": "<mask token>\n\n\nclass Award(BaseModel):\n name: str\n count: int\n\n\nclass Item(BaseModel):\n luckname: str = Field(..., title='抽奖规则名称', max_lenght=300)\n total: int = Field(..., title='抽奖总人数', gt=0)\n award: Optional[List[Award]] = Field(None, title='奖品列表')\n other: str = Field(..., title='参与奖或者未中奖')\n\n\napp = FastAPI()\n\n\nclass ResSuccess(BaseModel):\n ret: int = 0\n data\n\n\[email protected]('/')\ndef read_root():\n return {'Hello': 'World'}\n\n\[email protected]('/delect', tags=['抽奖接口'], summary='删除抽奖规则')\ndef delect(name: str):\n rq = RedisQueue(name)\n if rq.qsize:\n rq.lpop(name)\n return {'ret': 0, 'msg': '删除成功'}\n\n\[email protected]('/creat', tags=['抽奖接口'], summary='创建抽奖规则')\ndef creat(item: Item):\n \"\"\"\n 通过该接口可以创建一个抽奖规则\n \"\"\"\n myredis = MyRedis()\n rq = RedisQueue(item.luckname)\n print('ok')\n if rq.qsize():\n return {'ret': 500, 'msg': '该抽奖已经存在,请删除后重试'}\n result = {'ret': 0, 'item': item}\n awardlist = item.award\n lucklist = []\n luckdict = {}\n for ward in awardlist:\n luckdict[ward.name] = ward.count\n for i in range(ward.count):\n lucklist.append(ward.name)\n othercount = item.total - len(lucklist)\n if othercount:\n luckdict[item.other] = othercount\n others = [item.other] * othercount\n lucklist = lucklist + others\n random.shuffle(lucklist)\n print(lucklist)\n for luck in lucklist:\n rq.put(luck)\n myredis.hmset(item.luckname, luckdict)\n result = {'ret': 0, 'msg': 'succses'}\n return result\n\n\[email protected]('/luck', tags=['抽奖接口'], summary='抽奖接口')\ndef luck(id: int, luckname: str):\n \"\"\"\n 开始抽奖\n \"\"\"\n rd = RedisQueue(luckname)\n myredis = MyRedis()\n winner = luckname + '_winner'\n if myredis.hexists(winner, id):\n return {'ret': 0, 'msg': '您已经抽过了,不能再抽了'}\n award = rd.get_nowait()\n if award:\n myredis.hset(winner, id, award)\n myredis.hincrby(luckname, award, -1)\n result = {'ret': 0, 'data': {'flag': 1, 'msg': '恭喜你中奖了', 'award':\n award}}\n else:\n result = {'ret': 0, 'data': {'flag': 0, 'msg': '奖抽完了'}}\n return result\n\n\[email protected]('/luckman', tags=['抽奖接口'], summary='查看中奖名单')\ndef luckman(luckname: str):\n myredis = MyRedis()\n winner = luckname + '_winner'\n winnerlist = myredis.hgetall(winner)\n print(winnerlist)\n return {'ret': 0, 'data': winnerlist}\n\n\[email protected]('/remaining', tags=['抽奖接口'], summary='查看剩余奖品列表')\ndef Remaining(luckname: str):\n myredis = MyRedis()\n remainlist = myredis.hgetall(luckname)\n print(remainlist)\n return {'ret': 0, 'data': remainlist}\n",
"step-4": "from typing import Optional, List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, Field\nfrom redisqueue import RedisQueue, MyRedis\nimport random\n\n\nclass Award(BaseModel):\n name: str\n count: int\n\n\nclass Item(BaseModel):\n luckname: str = Field(..., title='抽奖规则名称', max_lenght=300)\n total: int = Field(..., title='抽奖总人数', gt=0)\n award: Optional[List[Award]] = Field(None, title='奖品列表')\n other: str = Field(..., title='参与奖或者未中奖')\n\n\napp = FastAPI()\n\n\nclass ResSuccess(BaseModel):\n ret: int = 0\n data\n\n\[email protected]('/')\ndef read_root():\n return {'Hello': 'World'}\n\n\[email protected]('/delect', tags=['抽奖接口'], summary='删除抽奖规则')\ndef delect(name: str):\n rq = RedisQueue(name)\n if rq.qsize:\n rq.lpop(name)\n return {'ret': 0, 'msg': '删除成功'}\n\n\[email protected]('/creat', tags=['抽奖接口'], summary='创建抽奖规则')\ndef creat(item: Item):\n \"\"\"\n 通过该接口可以创建一个抽奖规则\n \"\"\"\n myredis = MyRedis()\n rq = RedisQueue(item.luckname)\n print('ok')\n if rq.qsize():\n return {'ret': 500, 'msg': '该抽奖已经存在,请删除后重试'}\n result = {'ret': 0, 'item': item}\n awardlist = item.award\n lucklist = []\n luckdict = {}\n for ward in awardlist:\n luckdict[ward.name] = ward.count\n for i in range(ward.count):\n lucklist.append(ward.name)\n othercount = item.total - len(lucklist)\n if othercount:\n luckdict[item.other] = othercount\n others = [item.other] * othercount\n lucklist = lucklist + others\n random.shuffle(lucklist)\n print(lucklist)\n for luck in lucklist:\n rq.put(luck)\n myredis.hmset(item.luckname, luckdict)\n result = {'ret': 0, 'msg': 'succses'}\n return result\n\n\[email protected]('/luck', tags=['抽奖接口'], summary='抽奖接口')\ndef luck(id: int, luckname: str):\n \"\"\"\n 开始抽奖\n \"\"\"\n rd = RedisQueue(luckname)\n myredis = MyRedis()\n winner = luckname + '_winner'\n if myredis.hexists(winner, id):\n return {'ret': 0, 'msg': '您已经抽过了,不能再抽了'}\n award = rd.get_nowait()\n if award:\n myredis.hset(winner, id, award)\n myredis.hincrby(luckname, award, -1)\n result = {'ret': 0, 'data': {'flag': 1, 'msg': '恭喜你中奖了', 'award':\n award}}\n else:\n result = {'ret': 0, 'data': {'flag': 0, 'msg': '奖抽完了'}}\n return result\n\n\[email protected]('/luckman', tags=['抽奖接口'], summary='查看中奖名单')\ndef luckman(luckname: str):\n myredis = MyRedis()\n winner = luckname + '_winner'\n winnerlist = myredis.hgetall(winner)\n print(winnerlist)\n return {'ret': 0, 'data': winnerlist}\n\n\[email protected]('/remaining', tags=['抽奖接口'], summary='查看剩余奖品列表')\ndef Remaining(luckname: str):\n myredis = MyRedis()\n remainlist = myredis.hgetall(luckname)\n print(remainlist)\n return {'ret': 0, 'data': remainlist}\n",
"step-5": "from typing import Optional,List\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, Field\n\nfrom redisqueue import RedisQueue,MyRedis\nimport random\n\nclass Award(BaseModel):\n name: str\n count: int\n\nclass Item(BaseModel):\n luckname: str = Field(...,title=\"抽奖规则名称\",max_lenght = 300)\n total: int = Field(...,title=\"抽奖总人数\",gt=0)\n award: Optional[List[Award]] = Field(None,title=\"奖品列表\")\n other: str = Field(...,title=\"参与奖或者未中奖\")\napp = FastAPI()\n\nclass ResSuccess(BaseModel):\n ret: int = 0\n data\n\[email protected]('/')\ndef read_root():\n return {\"Hello\":\"World\"}\n\[email protected](\n '/delect', \n tags = [\"抽奖接口\"],\n summary = \"删除抽奖规则\"\n )\ndef delect(name:str):\n rq = RedisQueue(name)\n if rq.qsize:\n rq.lpop(name)\n return {\n 'ret':0,\n 'msg':\"删除成功\"\n }\n\[email protected](\n '/creat',\n tags = ['抽奖接口'],\n summary=\"创建抽奖规则\"\n)\ndef creat(item: Item):\n \"\"\"\n 通过该接口可以创建一个抽奖规则\n \"\"\"\n myredis = MyRedis()\n rq = RedisQueue(item.luckname)\n print(\"ok\")\n if rq.qsize():\n return {\n \"ret\":500,\n \"msg\":\"该抽奖已经存在,请删除后重试\"\n }\n result = {\"ret\":0, \"item\":item}\n awardlist = item.award\n lucklist =[]\n luckdict = {}\n for ward in awardlist:\n luckdict[ward.name] = ward.count\n for i in range(ward.count):\n lucklist.append(ward.name)\n othercount = item.total - len(lucklist)\n\n if othercount:\n luckdict[item.other] = othercount\n others = [item.other] * othercount\n \n lucklist = lucklist + others\n random.shuffle(lucklist)\n print(lucklist)\n for luck in lucklist:\n rq.put(luck)\n \n myredis.hmset(item.luckname,luckdict)\n\n result = {\n 'ret': 0,\n 'msg': \"succses\"\n }\n return result\n\[email protected]('/luck', tags = [\"抽奖接口\"], summary=\"抽奖接口\")\ndef luck(id: int,luckname: str):\n \"\"\"\n 开始抽奖\n \"\"\"\n rd = RedisQueue(luckname)\n myredis = MyRedis()\n winner = luckname+\"_winner\"\n if myredis.hexists(winner,id):\n return {\n \"ret\":0,\n \"msg\":\"您已经抽过了,不能再抽了\"\n }\n award = rd.get_nowait()\n if award:\n myredis.hset(winner,id,award)\n myredis.hincrby(luckname,award,-1)\n \n result = {\n \"ret\":0,\n 'data':{\n \"flag\":1,\n \"msg\":\"恭喜你中奖了\",\n \"award\":award\n }\n }\n else:\n result = {\n \"ret\":0,\n 'data':{\n \"flag\":0,\n \"msg\":\"奖抽完了\",\n }\n }\n \n return result\n\[email protected]('/luckman',tags = [\"抽奖接口\"],summary=\"查看中奖名单\")\ndef luckman(luckname: str):\n myredis = MyRedis()\n winner = luckname + \"_winner\"\n winnerlist = myredis.hgetall(winner)\n print(winnerlist)\n return {\n \"ret\":0,\n \"data\":winnerlist\n }\n\[email protected]('/remaining',tags = [\"抽奖接口\"],summary=\"查看剩余奖品列表\")\ndef Remaining(luckname: str):\n myredis = MyRedis()\n remainlist = myredis.hgetall(luckname)\n print(remainlist)\n return {\n \"ret\":0,\n \"data\":remainlist\n }\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
from unittest import TestCase
from angelos.document.statements import Statement, Verified, Trusted, Revoked
class TestStatement(TestCase):
def setUp(self):
self.instance = Statement()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestVerified(TestCase):
def setUp(self):
self.instance = Verified()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestTrusted(TestCase):
def setUp(self):
self.instance = Trusted()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
class TestRevoked(TestCase):
def setUp(self):
self.instance = Revoked()
def tearDown(self):
del self.instance
def test_apply_rules(self):
self.assertTrue(self.instance.apply_rules())
|
normal
|
{
"blob_id": "f494dc99febfad99b371d72f542556a9024bc27d",
"index": 5333,
"step-1": "<mask token>\n\n\nclass TestVerified(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-2": "<mask token>\n\n\nclass TestStatement(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-3": "<mask token>\n\n\nclass TestStatement(TestCase):\n <mask token>\n\n def tearDown(self):\n del self.instance\n <mask token>\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-4": "from unittest import TestCase\nfrom angelos.document.statements import Statement, Verified, Trusted, Revoked\n\n\nclass TestStatement(TestCase):\n\n def setUp(self):\n self.instance = Statement()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestVerified(TestCase):\n\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n",
"step-5": "#\n# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.\n#\n# This software is available under the terms of the MIT license. Parts are licensed under\n# different terms if stated. The legal terms are attached to the LICENSE file and are\n# made available on:\n#\n# https://opensource.org/licenses/MIT\n#\n# SPDX-License-Identifier: MIT\n#\n# Contributors:\n# Kristoffer Paulsson - initial implementation\n#\nfrom unittest import TestCase\n\nfrom angelos.document.statements import Statement, Verified, Trusted, Revoked\n\n\nclass TestStatement(TestCase):\n def setUp(self):\n self.instance = Statement()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestVerified(TestCase):\n def setUp(self):\n self.instance = Verified()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestTrusted(TestCase):\n def setUp(self):\n self.instance = Trusted()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())\n\n\nclass TestRevoked(TestCase):\n def setUp(self):\n self.instance = Revoked()\n\n def tearDown(self):\n del self.instance\n\n def test_apply_rules(self):\n self.assertTrue(self.instance.apply_rules())",
"step-ids": [
9,
13,
14,
17,
18
]
}
|
[
9,
13,
14,
17,
18
] |
from mininet.cli import CLI
from mininet.term import makeTerms
from mininet.util import irange
from log import log
from utils import (UITextStyle, display)
from dijkstra import (get_routing_decision, get_route_cost)
# Check if route directly connects two switches
def isDirect(route):
return (len(route) == 2)
# Add purple background for indirect routes
def brightLabel(text):
return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)
# Execute commands one by one
def __wait__(*commandList):
steps = len(commandList)
for i in range(steps):
commandList[i]('')
display.prompt('\n\nPress <Return> to continue (%s/%s)' %
(i + 1, steps))
try:
x = input('')
except:
x = ''
# Mininet Command Line Interface extension
class DongPhamTestCli(CLI):
prompt = 'dongpham> '
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
# Tell the controller to do a command
def do(self, shell, quiet=False):
if (quiet):
return self.mn.controller.cmd(shell)
return self.mn.controller.cmdPrint(shell)
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
# Run all commands in the wait list
def do_all(self, _):
__wait__(
# Show ip
self.do_ips,
# Routing commands
self.do_weights, self.do_costs, self.do_routes, self.do_paths,
# Flow commands
self.do_flows, self.do_stats
)
# Show object info
# info [node1, node2, ...]
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section("All functions")
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
# Show IP addresses
# ips
def do_ips(self, _):
display.section("IP Addresses")
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
# For each node
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
#MARK: - Routing
# Show link weights
# weights
def do_weights(self, _):
display.section("Weights")
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for (i, j, w) in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
# Show costs of reaching every other switch
# costs
def do_costs(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print cost of reaching 'end' switch from 'start' switch
display.section("Total path costs")
print('From\\To'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if (isDirect(route)):
# Print result for directly connected switches
print(cost),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(cost)),
print('\t'),
print('')
# Show least-cost paths from every switch to every other switch
# routes
def do_routes(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print next hop switch
display.section("First-Hop with lowest cost")
print('From\\To\t'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if (isDirect(route)):
# Print result for directly connected switches
print(end),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(route[1])),
print('\t'),
print('')
# Show the complete shortest path from one switch to every other switch
# paths
def do_paths(self, line):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Least cost paths to every node
display.section("Least-cost paths to other nodes")
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if (start == end):
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' %
(start, end, cost, route))
#MARK: - OpenFlow
# Display flows
# flows
def do_flows(self, _line):
display.section("Showing all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
# Delete flows
# deleteFlows
def do_deleteFlows(self, _line):
display.section("Deleting all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
# Display flow statistics
# stats
def do_stats(self, _):
display.section("OpenFlow: Sent/Received Packets")
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"')
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To' % (s.name))
# MARK: - Run on every node
# arps
def do_arps(self, _line):
display.section("ARP caches of all hosts")
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
# netstats
def do_netstats(self, _line):
display.section("Routing Tables")
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
# ifconfigs
def do_ifconfigs(self, _line):
display.section("Showing Interface Configuration")
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
#MARK: - Other
def do_xxx_testFlows1(self, _line):
display.section("Adding test flows to Tiny Network")
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')
def do_xxx_traffic(self, _line):
# display.section("Monitoring sent and received packets of all hosts")
for h in self.mn.hosts:
h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name]
for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section("Launching Wireshark")
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
obj.cmdPrint(sh)
|
normal
|
{
"blob_id": "7636925982434b12307383ba7b01f931f7ea6e24",
"index": 5927,
"step-1": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n <mask token>\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n <mask token>\n <mask token>\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-2": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-3": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-4": "from mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\nfrom log import log\nfrom utils import UITextStyle, display\nfrom dijkstra import get_routing_decision, get_route_cost\n\n\ndef isDirect(route):\n return len(route) == 2\n\n\ndef brightLabel(text):\n return UITextStyle.BackgroundColor.purple + str(text\n ) + UITextStyle.Format.reset\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' % (i + 1,\n steps))\n try:\n x = input('')\n except:\n x = ''\n\n\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-5": "\nfrom mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\n\nfrom log import log\nfrom utils import (UITextStyle, display)\n\nfrom dijkstra import (get_routing_decision, get_route_cost)\n\n# Check if route directly connects two switches\ndef isDirect(route):\n return (len(route) == 2)\n# Add purple background for indirect routes\n\n\ndef brightLabel(text):\n return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)\n# Execute commands one by one\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' %\n (i + 1, steps))\n try:\n x = input('')\n except:\n x = ''\n\n\n# Mininet Command Line Interface extension\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n # Tell the controller to do a command\n def do(self, shell, quiet=False):\n if (quiet):\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n # Run all commands in the wait list\n def do_all(self, _):\n __wait__(\n # Show ip\n self.do_ips,\n # Routing commands\n self.do_weights, self.do_costs, self.do_routes, self.do_paths,\n # Flow commands\n self.do_flows, self.do_stats\n )\n\n # Show object info\n # info [node1, node2, ...]\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section(\"All functions\")\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n # Show IP addresses\n # ips\n def do_ips(self, _):\n display.section(\"IP Addresses\")\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n # For each node\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n #MARK: - Routing\n # Show link weights\n # weights\n def do_weights(self, _):\n display.section(\"Weights\")\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for (i, j, w) in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n # Show costs of reaching every other switch\n # costs\n def do_costs(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print cost of reaching 'end' switch from 'start' switch\n display.section(\"Total path costs\")\n print('From\\\\To'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if (isDirect(route)):\n # Print result for directly connected switches\n print(cost),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n # Show least-cost paths from every switch to every other switch\n # routes\n def do_routes(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print next hop switch\n display.section(\"First-Hop with lowest cost\")\n print('From\\\\To\\t'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if (isDirect(route)):\n # Print result for directly connected switches\n print(end),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n # Show the complete shortest path from one switch to every other switch\n # paths\n def do_paths(self, line):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Least cost paths to every node\n display.section(\"Least-cost paths to other nodes\")\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if (start == end):\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' %\n (start, end, cost, route))\n\n #MARK: - OpenFlow\n # Display flows\n # flows\n def do_flows(self, _line):\n display.section(\"Showing all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n # Delete flows\n # deleteFlows\n\n def do_deleteFlows(self, _line):\n display.section(\"Deleting all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n # Display flow statistics\n # stats\n\n def do_stats(self, _):\n display.section(\"OpenFlow: Sent/Received Packets\")\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"')\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To' % (s.name))\n\n # MARK: - Run on every node\n # arps\n def do_arps(self, _line):\n display.section(\"ARP caches of all hosts\")\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n # netstats\n\n def do_netstats(self, _line):\n display.section(\"Routing Tables\")\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n # ifconfigs\n\n def do_ifconfigs(self, _line):\n display.section(\"Showing Interface Configuration\")\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n #MARK: - Other\n def do_xxx_testFlows1(self, _line):\n display.section(\"Adding test flows to Tiny Network\")\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n #\t\tdisplay.section(\"Monitoring sent and received packets of all hosts\")\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name]\n for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section(\"Launching Wireshark\")\n sh = 'sudo wireshark &'\n\n locals = self.getLocals()\n _nodes = line.split()\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-ids": [
10,
18,
21,
26,
27
]
}
|
[
10,
18,
21,
26,
27
] |
num = 15850
base = 16
# Primera división
residuo = num % base
cociente = num // base
bit1 = str(residuo)
bit1 = bit1.replace("10","a")
bit1 = bit1.replace("11","b")
bit1 = bit1.replace("12","c")
bit1 = bit1.replace("13","d")
bit1 = bit1.replace("14","e")
bit1 = bit1.replace("15","f")
# Segunda división
residuo = cociente % base
cociente = cociente // base
bit2 = str(residuo)
bit2 = bit2.replace("10","a")
bit2 = bit2.replace("11","b")
bit2 = bit2.replace("12","c")
bit2 = bit2.replace("13","d")
bit2 = bit2.replace("14","e")
bit2 = bit2.replace("15","f")
# Tercera división
residuo = cociente % base
cociente = cociente // base
bit3 = str(residuo)
bit3 = bit3.replace("10","a")
bit3 = bit3.replace("11","b")
bit3 = bit3.replace("12","c")
bit3 = bit3.replace("13","d")
bit3 = bit3.replace("14","e")
bit3 = bit3.replace("15","f")
# Cuarta división
residuo = cociente % base
cociente = cociente // base
bit4 = str(residuo)
bit4 = bit4.replace("10","a")
bit4 = bit4.replace("11","b")
bit4 = bit4.replace("12","c")
bit4 = bit4.replace("13","d")
bit4 = bit4.replace("14","e")
bit4 = bit4.replace("15","f")
print("{} = {}{}{}{}".format(num,bit4,bit3,bit2,bit1))
|
normal
|
{
"blob_id": "2d72f063362aaefdc236e1240020c71bacaf51cf",
"index": 8057,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{} = {}{}{}{}'.format(num, bit4, bit3, bit2, bit1))\n",
"step-3": "num = 15850\nbase = 16\nresiduo = num % base\ncociente = num // base\nbit1 = str(residuo)\nbit1 = bit1.replace('10', 'a')\nbit1 = bit1.replace('11', 'b')\nbit1 = bit1.replace('12', 'c')\nbit1 = bit1.replace('13', 'd')\nbit1 = bit1.replace('14', 'e')\nbit1 = bit1.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit2 = str(residuo)\nbit2 = bit2.replace('10', 'a')\nbit2 = bit2.replace('11', 'b')\nbit2 = bit2.replace('12', 'c')\nbit2 = bit2.replace('13', 'd')\nbit2 = bit2.replace('14', 'e')\nbit2 = bit2.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit3 = str(residuo)\nbit3 = bit3.replace('10', 'a')\nbit3 = bit3.replace('11', 'b')\nbit3 = bit3.replace('12', 'c')\nbit3 = bit3.replace('13', 'd')\nbit3 = bit3.replace('14', 'e')\nbit3 = bit3.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit4 = str(residuo)\nbit4 = bit4.replace('10', 'a')\nbit4 = bit4.replace('11', 'b')\nbit4 = bit4.replace('12', 'c')\nbit4 = bit4.replace('13', 'd')\nbit4 = bit4.replace('14', 'e')\nbit4 = bit4.replace('15', 'f')\nprint('{} = {}{}{}{}'.format(num, bit4, bit3, bit2, bit1))\n",
"step-4": "num = 15850\nbase = 16\n\n# Primera división\nresiduo = num % base\ncociente = num // base\nbit1 = str(residuo)\n\nbit1 = bit1.replace(\"10\",\"a\")\nbit1 = bit1.replace(\"11\",\"b\")\nbit1 = bit1.replace(\"12\",\"c\")\nbit1 = bit1.replace(\"13\",\"d\")\nbit1 = bit1.replace(\"14\",\"e\")\nbit1 = bit1.replace(\"15\",\"f\")\n\n\n# Segunda división\nresiduo = cociente % base\ncociente = cociente // base\nbit2 = str(residuo)\n\nbit2 = bit2.replace(\"10\",\"a\")\nbit2 = bit2.replace(\"11\",\"b\")\nbit2 = bit2.replace(\"12\",\"c\")\nbit2 = bit2.replace(\"13\",\"d\")\nbit2 = bit2.replace(\"14\",\"e\")\nbit2 = bit2.replace(\"15\",\"f\")\n\n\n# Tercera división\nresiduo = cociente % base\ncociente = cociente // base\nbit3 = str(residuo)\n\nbit3 = bit3.replace(\"10\",\"a\")\nbit3 = bit3.replace(\"11\",\"b\")\nbit3 = bit3.replace(\"12\",\"c\")\nbit3 = bit3.replace(\"13\",\"d\")\nbit3 = bit3.replace(\"14\",\"e\")\nbit3 = bit3.replace(\"15\",\"f\")\n\n\n# Cuarta división\nresiduo = cociente % base\ncociente = cociente // base\nbit4 = str(residuo)\n\nbit4 = bit4.replace(\"10\",\"a\")\nbit4 = bit4.replace(\"11\",\"b\")\nbit4 = bit4.replace(\"12\",\"c\")\nbit4 = bit4.replace(\"13\",\"d\")\nbit4 = bit4.replace(\"14\",\"e\")\nbit4 = bit4.replace(\"15\",\"f\")\n\n\nprint(\"{} = {}{}{}{}\".format(num,bit4,bit3,bit2,bit1))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
positivo = float(1.0000001)
negativo = float(-1.000001)
print(negativo, positivo)
b_pos = bin(positivo)
b_neg = bin(negativo)
print(b_neg, b_pos)
|
normal
|
{
"blob_id": "5c908697000247056bb63a443f837eef88b4c957",
"index": 9196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(negativo, positivo)\n<mask token>\nprint(b_neg, b_pos)\n",
"step-3": "positivo = float(1.0000001)\nnegativo = float(-1.000001)\nprint(negativo, positivo)\nb_pos = bin(positivo)\nb_neg = bin(negativo)\nprint(b_neg, b_pos)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys, os
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def numStrip(n):
striped = []
if n == 0:
return [0]
while n > 0:
striped.append(n % 10)
n //= 10
return striped
|
normal
|
{
"blob_id": "5fb3905abf958f0a8be41cd6ad07efb2a0cf6c66",
"index": 7542,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n return striped\n",
"step-4": "import sys, os\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n return striped\n",
"step-5": "import sys, os\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n \n return striped\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datastructure
import wordUri
class Question:
def __init__(self, nlp, otter, nounArray, verbArray):
self.nlp = nlp
self.nounArray = nounArray
self.verbArray = verbArray
self.file = otter
def findFirst(self, sentence):
sentenceDoc = self.nlp(sentence)
for word in sentenceDoc:
if word.dep_ == "ROOT":
verb = self.verbArray.findWord(word.orth_)
children = []
for ch in word.children:
children.append(ch)
self.findSecond(sentenceDoc, verb, children)
break
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == "attr" or child.dep_ == "nsubj":
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
def findThird(self, sentenceDoc, subject, verb, children, flag):
for child in children:
if child.dep_ == "appos" or child.dep_ == "pobj":
temp = self.nounArray.findWord(child.orth_)
if temp is None:
w = datastructure.Word(child.orth_)
w.addType(child.pos_)
w.addUri(wordUri.findUri(w))
#w.addUri(w.word + "URI")
print(subject.uri, "- " + verb.uri + " -", w.uri)
self.writeOtter(subject.uri, verb.uri, w.uri)
else:
print(subject.uri, "- " + verb.uri + " -", temp.uri)
self.writeOtter(subject.uri, verb.uri, temp.uri)
#self.recoursiveFind(sentenceDoc, subject, verb, child)
if child.dep_ == "prep" or child.dep_ == "acomp":
if not flag:
verb = datastructure.Word(child.orth_)
verb.addType(child.pos_)
verb.addUri(wordUri.findUri(verb))
verbChildren = []
for ch in child.children:
verbChildren.append(ch)
self.findThird(sentenceDoc, subject, verb, verbChildren, True)
def writeOtter(self, first, second, third):
self.file.write("-rdf(\"" + first + "\", \"" + second + "\", \"" + third + "\").\n")
|
normal
|
{
"blob_id": "4d63a5f09164b78faa731af6dce41969edc2c4f5",
"index": 848,
"step-1": "<mask token>\n\n\nclass Question:\n <mask token>\n <mask token>\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n <mask token>\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n",
"step-2": "<mask token>\n\n\nclass Question:\n <mask token>\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n <mask token>\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n",
"step-3": "<mask token>\n\n\nclass Question:\n\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == 'appos' or child.dep_ == 'pobj':\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n print(subject.uri, '- ' + verb.uri + ' -', w.uri)\n self.writeOtter(subject.uri, verb.uri, w.uri)\n else:\n print(subject.uri, '- ' + verb.uri + ' -', temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n if child.dep_ == 'prep' or child.dep_ == 'acomp':\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n",
"step-4": "import datastructure\nimport wordUri\n\n\nclass Question:\n\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == 'appos' or child.dep_ == 'pobj':\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n print(subject.uri, '- ' + verb.uri + ' -', w.uri)\n self.writeOtter(subject.uri, verb.uri, w.uri)\n else:\n print(subject.uri, '- ' + verb.uri + ' -', temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n if child.dep_ == 'prep' or child.dep_ == 'acomp':\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n",
"step-5": "import datastructure\nimport wordUri\n\n\nclass Question:\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == \"ROOT\":\n verb = self.verbArray.findWord(word.orth_)\n\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n\n for child in children:\n if child.dep_ == \"attr\" or child.dep_ == \"nsubj\":\n temp = self.nounArray.findWord(child.orth_)\n\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == \"appos\" or child.dep_ == \"pobj\":\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n #w.addUri(w.word + \"URI\")\n print(subject.uri, \"- \" + verb.uri + \" -\", w.uri)\n\n self.writeOtter(subject.uri, verb.uri, w.uri)\n\n else:\n print(subject.uri, \"- \" + verb.uri + \" -\", temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n\n #self.recoursiveFind(sentenceDoc, subject, verb, child)\n if child.dep_ == \"prep\" or child.dep_ == \"acomp\":\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write(\"-rdf(\\\"\" + first + \"\\\", \\\"\" + second + \"\\\", \\\"\" + third + \"\\\").\\n\")\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
# boj, 9237 : 이장님 초대, python3
# 그리디 알고리즘
import sys
def tree(l):
return max([i+j+2 for i,j in enumerate(l)])
N = int(sys.stdin.readline())
t = sorted(list(map(int, sys.stdin.readline().split())), reverse = True)
print(tree(t))
|
normal
|
{
"blob_id": "e79cdd32977eb357c3f6709887b671c50eb1fa45",
"index": 7071,
"step-1": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\nprint(tree(t))\n",
"step-3": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n",
"step-4": "import sys\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n",
"step-5": "# boj, 9237 : 이장님 초대, python3\n# 그리디 알고리즘\nimport sys\n\ndef tree(l):\n return max([i+j+2 for i,j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse = True)\n\nprint(tree(t))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
Please Note:
Note: It is intended for some problems to be ambiguous. You should gather all requirements up front before implementing one.
Please think of all the corner cases and clarifications yourself.
Validate if a given string is numeric.
Examples:
1."0" => true
2." 0.1 " => true
3."abc" => false
4."1 a" => false
5."2e10" => true
Return 0 / 1 ( 0 for false, 1 for true ) for this problem
Clarify the question using “See Expected Output”
1.Is 1u ( which may be a representation for unsigned integers valid?
For this problem, no.
2.Is 0.1e10 valid?
Yes
3.-01.1e-10?
Yes
4.Hexadecimal numbers like 0xFF?
Not for the purpose of this problem
5. 3. (. not followed by a digit)?
No
6.Can exponent have decimal numbers? 3e0.1?
Not for this problem.
7.Is 1f ( floating point number with f as prefix ) valid?
Not for this problem.
8.How about 1000LL or 1000L ( C++ representation for long and long long numbers )?
Not for this problem.
9.How about integers preceded by 00 or 0? like 008?
Yes for this problem
'''
class Solution:
# @param A : string
# @return an integer
def isNumber(self, A):
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
if len(A)==0:
return 0
for c in A:
if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:
return 0
if 'e' in A:
A = A.split('e')
if len(A)!=2:
return 0
return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))
return int(self.isnum(A, 0))
def isnum(self, A, i):
#print(A,i)
if A=='':
return False
if i == 1 or (i == 0 and '.' not in A):
if A[0] in ['+', '-']:
A = A[1:]
if A == '':
return False
for c in A:
if c not in [str(i) for i in range(10)]:
return False
return True
A = A.split('.')
return (self.isnum(A[0], 1) or A[0]=='') and self.isnum(A[1], 1)
|
normal
|
{
"blob_id": "50be2cbdaec6ed76e5d9367c6a83222f9153db82",
"index": 7426,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isNumber(self, A):\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n if len(A) == 0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A) != 2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def isNumber(self, A):\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n while len(A) > 0 and A[0] == ' ':\n A = A[1:]\n A = A[::-1]\n if len(A) == 0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A) != 2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n\n def isnum(self, A, i):\n if A == '':\n return False\n if i == 1 or i == 0 and '.' not in A:\n if A[0] in ['+', '-']:\n A = A[1:]\n if A == '':\n return False\n for c in A:\n if c not in [str(i) for i in range(10)]:\n return False\n return True\n A = A.split('.')\n return (self.isnum(A[0], 1) or A[0] == '') and self.isnum(A[1], 1)\n",
"step-5": "'''\nPlease Note:\nNote: It is intended for some problems to be ambiguous. You should gather all requirements up front before implementing one.\n\nPlease think of all the corner cases and clarifications yourself.\n\nValidate if a given string is numeric.\n\nExamples:\n\n1.\"0\" => true\n2.\" 0.1 \" => true\n3.\"abc\" => false\n4.\"1 a\" => false\n5.\"2e10\" => true\nReturn 0 / 1 ( 0 for false, 1 for true ) for this problem\n\nClarify the question using “See Expected Output”\n\n1.Is 1u ( which may be a representation for unsigned integers valid?\nFor this problem, no.\n2.Is 0.1e10 valid?\nYes\n3.-01.1e-10?\nYes\n4.Hexadecimal numbers like 0xFF?\nNot for the purpose of this problem\n5. 3. (. not followed by a digit)?\nNo\n6.Can exponent have decimal numbers? 3e0.1?\nNot for this problem.\n7.Is 1f ( floating point number with f as prefix ) valid?\nNot for this problem.\n8.How about 1000LL or 1000L ( C++ representation for long and long long numbers )?\nNot for this problem.\n9.How about integers preceded by 00 or 0? like 008?\nYes for this problem\n'''\nclass Solution:\n # @param A : string\n # @return an integer\n def isNumber(self, A):\n while len(A)>0 and A[0]==' ':\n A = A[1:]\n A=A[::-1]\n while len(A)>0 and A[0]==' ':\n A = A[1:]\n A=A[::-1]\n if len(A)==0:\n return 0\n for c in A:\n if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:\n return 0\n if 'e' in A:\n A = A.split('e')\n if len(A)!=2:\n return 0\n return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))\n return int(self.isnum(A, 0))\n \n def isnum(self, A, i):\n #print(A,i)\n if A=='':\n return False\n if i == 1 or (i == 0 and '.' not in A):\n if A[0] in ['+', '-']:\n A = A[1:]\n if A == '':\n return False\n for c in A:\n if c not in [str(i) for i in range(10)]:\n return False\n return True\n A = A.split('.')\n return (self.isnum(A[0], 1) or A[0]=='') and self.isnum(A[1], 1)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import time
import unittest
from old import dict_groupby
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {
'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)
}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {
'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding,
'transactions': transactions
}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {
'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),
'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)
}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
# print(out)
print(t1 - t0, tf - t1, tf - t0)
# df = pd.DataFrame(data).T
# t0 = time.time()
# df.groupby(group_columns).sum()
# tf = time.time()
# # print(out)
# print(tf - t0)
|
normal
|
{
"blob_id": "f8e6f6e1be6c4ea306b7770c918b97808a0765b2",
"index": 6580,
"step-1": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n <mask token>\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-3": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-4": "import random\nimport time\nimport unittest\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-5": "import random\nimport time\nimport unittest\n\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {\n 'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)\n }\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n\n return {\n 'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding,\n 'transactions': transactions\n }\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {\n 'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),\n 'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)\n }\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n # print(out)\n print(t1 - t0, tf - t1, tf - t0)\n\n # df = pd.DataFrame(data).T\n # t0 = time.time()\n # df.groupby(group_columns).sum()\n # tf = time.time()\n # # print(out)\n # print(tf - t0)",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
#led = 21
pins = [21, 25, 18]
# 0 1 2 3 4
names = ["First", "Second", "Third"]
for x in range(len(pins)):
GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)
#GPIO.setup(led, GPIO.OUT)
while True:
input_state = 0
for i in range(len(pins)):
input_state = GPIO.input(pins[i])
if input_state == False:
print('Button {0} Pressed'.format(names[i]))
time.sleep(0.2)
# if (i == 0):
# print("TURN ON LED")
# GPIO.output(led, 1)
# if (i == 1):
# print("TURN OFF LED")
# GPIO.output(led, 0)
|
normal
|
{
"blob_id": "d292de887c427e3a1b95d13cef17de1804f8f9ee",
"index": 6535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\n<mask token>\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-3": "<mask token>\nGPIO.setmode(GPIO.BCM)\npins = [21, 25, 18]\nnames = ['First', 'Second', 'Third']\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-4": "import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\npins = [21, 25, 18]\nnames = ['First', 'Second', 'Third']\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n",
"step-5": "import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\n#led = 21\n\npins = [21, 25, 18]\n# 0 1 2 3 4\nnames = [\"First\", \"Second\", \"Third\"]\n\nfor x in range(len(pins)):\n GPIO.setup(pins[x], GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n#GPIO.setup(led, GPIO.OUT)\n\n\nwhile True:\n input_state = 0\n for i in range(len(pins)):\n input_state = GPIO.input(pins[i])\n if input_state == False:\n print('Button {0} Pressed'.format(names[i]))\n time.sleep(0.2)\n # if (i == 0):\n # print(\"TURN ON LED\")\n # GPIO.output(led, 1)\n # if (i == 1):\n # print(\"TURN OFF LED\")\n # GPIO.output(led, 0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tcod as libtcod
import color
from input_handlers import consts
from input_handlers.ask_user_event_handler import AskUserEventHandler
class SelectIndexHandler(AskUserEventHandler):
"""
Handles asking the user for an index on the map.
"""
def __init__(self, engine):
super().__init__(engine)
player = self.engine.player
engine.mouse_location = (player.x, player.y)
def on_render(self, console):
"""
Highlight the tile under the cursor.
"""
super().on_render(console)
x, y = self.engine.mouse_location
console.tiles_rgb['bg'][x, y] = color.white
console.tiles_rgb['fg'][x, y] = color.black
def ev_keydown(self, event):
key = event.sym
if key in consts.MOVE_KEYS:
modifier = 1 # Holding modifier keys will speed up key movement
if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.KMOD_RSHIFT):
modifier *= 5
if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL):
modifier *= 10
if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):
modifier *= 20
x, y = self.engine.mouse_location
dx, dy = consts.MOVE_KEYS[key]
x += dx * modifier
y += dy * modifier
# Restrict the cursor inddex to the map size.
x = max(0, min(x, self.engine.game_map.width - 1))
y = max(0, min(y, self.engine.game_map.height - 1))
self.engine.mouse_location = (x, y)
return None
elif key in consts.CONFIRM_KEYS:
return self.on_index_selected(*self.engine.mouse_location)
return super().ev_keydown(event)
def ev_mousebuttondown(self, event):
"""
Left click confirms a selection
"""
if self.engine.game_map.in_bounds(*event.tile):
if event.button == 1:
return self.on_index_selected(*event.tile)
return super().ev_mousebuttondown(event)
def on_index_selected(self, x, y):
raise NotImplementedError()
|
normal
|
{
"blob_id": "8c7dcff80eeb8d7d425cfb25da8a30fc15daf5f9",
"index": 4872,
"step-1": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n <mask token>\n <mask token>\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n <mask token>\n <mask token>\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-2": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n <mask token>\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-3": "<mask token>\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-4": "import tcod as libtcod\nimport color\nfrom input_handlers import consts\nfrom input_handlers.ask_user_event_handler import AskUserEventHandler\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = player.x, player.y\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.\n KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL\n ):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = x, y\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-5": "import tcod as libtcod\n\nimport color\nfrom input_handlers import consts\nfrom input_handlers.ask_user_event_handler import AskUserEventHandler\n\n\nclass SelectIndexHandler(AskUserEventHandler):\n \"\"\"\n Handles asking the user for an index on the map.\n \"\"\"\n\n def __init__(self, engine):\n super().__init__(engine)\n player = self.engine.player\n engine.mouse_location = (player.x, player.y)\n\n def on_render(self, console):\n \"\"\"\n Highlight the tile under the cursor.\n \"\"\"\n super().on_render(console)\n x, y = self.engine.mouse_location\n console.tiles_rgb['bg'][x, y] = color.white\n console.tiles_rgb['fg'][x, y] = color.black\n\n def ev_keydown(self, event):\n key = event.sym\n if key in consts.MOVE_KEYS:\n modifier = 1 # Holding modifier keys will speed up key movement\n if event.mod & (libtcod.event.KMOD_LSHIFT | libtcod.event.KMOD_RSHIFT):\n modifier *= 5\n if event.mod & (libtcod.event.KMOD_LCTRL | libtcod.event.KMOD_RCTRL):\n modifier *= 10\n if event.mod & (libtcod.event.KMOD_LALT | libtcod.event.KMOD_RALT):\n modifier *= 20\n\n x, y = self.engine.mouse_location\n dx, dy = consts.MOVE_KEYS[key]\n x += dx * modifier\n y += dy * modifier\n # Restrict the cursor inddex to the map size.\n x = max(0, min(x, self.engine.game_map.width - 1))\n y = max(0, min(y, self.engine.game_map.height - 1))\n self.engine.mouse_location = (x, y)\n return None\n elif key in consts.CONFIRM_KEYS:\n return self.on_index_selected(*self.engine.mouse_location)\n return super().ev_keydown(event)\n\n def ev_mousebuttondown(self, event):\n \"\"\"\n Left click confirms a selection\n \"\"\"\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)\n\n def on_index_selected(self, x, y):\n raise NotImplementedError()\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
__version__ = '18.07.0'
|
normal
|
{
"blob_id": "3cac7829cf0c07ddc704a25ec3c781c9510a8e0c",
"index": 3613,
"step-1": "<mask token>\n",
"step-2": "__version__ = '18.07.0'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.
num1 = 0
for c in range(0,7):
pe1 = int(input('Digite o ano de nascimento: '))
pe1 = 2019 - pe1
if pe1 >= 21:
num1 = num1 + 1
print(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')
|
normal
|
{
"blob_id": "251d589a5815d77d2bc375d8d4a7d41e79a2a5cd",
"index": 5303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-3": "num1 = 0\nfor c in range(0, 7):\n pe1 = int(input('Digite o ano de nascimento: '))\n pe1 = 2019 - pe1\n if pe1 >= 21:\n num1 = num1 + 1\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')\n",
"step-4": "# Mostra entre as 7 pessoas, quantas pessoas são maiores de idade.\r\n\r\n\r\nnum1 = 0\r\nfor c in range(0,7):\r\n pe1 = int(input('Digite o ano de nascimento: '))\r\n pe1 = 2019 - pe1\r\n if pe1 >= 21:\r\n num1 = num1 + 1\r\nprint(f'Entre as 7 pessoas, {num1} pessoas são maiores de idade.')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from Eutils.pathmagic import context
with context():
import argparse
import numpy as np
from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet
from evaluator.Eutils.pascal_val import PASCAL_VAL
# from evaluator.Eutils.coco_val import COCO_VAL
from evaluator.Eutils.detector import Detector
import utils.config as cfg
from utils.logger import Logger
from utils.config_utils import get_config,ds_config
from tqdm import tqdm
import tensorflow as tf
import copy
import os
# import cv2
# from evaluator.Eutils.draw_result import draw_result
class EVALUATOR(object):
def __init__(self, detector, data):
self.detector = detector
self.data = data
self.gt = self.data.gt
self.image_ids, self.bboxes, \
self.prob, self.annotations = self.prepare()
self.precision, self.recall = self.pr_curve()
def prepare(self):
image_ids, bboxes, prob = [], [], []
annotations = {}
# while img_batch:
for i in tqdm(range(self.data.num_batch), desc='batch forward'):
# print("{:5}th batch".format(i))
img_batch, bbox_batch = self.data.get_batch()
results = self.detector.detect_batch(img_batch)
for ii in range(len(results)):
boxes_filtered, probs_filtered = results[ii]
# bbox_gt = bbox_batch[ii]['bbox_det']['bboxes']
# filter_mat_probs = np.array(probs_filtered >= cfg.THRESHOLD, dtype='bool')
# filter_mat_probs = np.nonzero(filter_mat_probs)
# boxes_ft_prob = boxes_filtered[filter_mat_probs]
# probs_ft_prob = probs_filtered[filter_mat_probs]
# image = img_batch[ii]
# draw_result(image, bbox_gt, (0, 0, 255))
# draw_result(image, boxes_ft_prob, (255, 0, 0))
# cv2.imshow('Image', image)
# cv2.waitKey(0)
image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))
bboxes.extend(boxes_filtered)
prob.extend(probs_filtered)
if bbox_batch[ii]['id'] not in annotations:
annotations[bbox_batch[ii]['id']] = copy.deepcopy(bbox_batch[ii]['bbox_det'])
sorted_ind = np.argsort(prob)[::-1]
sorted_prob = np.sort(prob)[::-1]
BB = np.array(bboxes)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
return image_ids, BB, sorted_prob, annotations
def pr_curve(self):
nd = len(self.image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in tqdm(range(nd), desc='painting PR curve'):
# for d in range(nd):
R = self.annotations[self.image_ids[d]]
bb = self.bboxes[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bboxes'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[2] / 2)
iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[3] / 2)
ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[2] / 2)
iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[3] / 2)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > cfg.IOU_THRESHOLD_GT:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(self.gt)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
return prec, rec
def eval(self, use_07_metric=False):
""" ap = eval(rec, prec, [use_07_metric])
Compute AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(self.recall >= t) == 0:
p = 0
else:
p = np.max(self.precision[self.recall >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], self.recall, [1.]))
mpre = np.concatenate(([0.], self.precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-ims', '--image_size', default=512, type=int)
parser.add_argument('-g','--gpu', type=str)
parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')
parser.add_argument('-ds', '--data_source', default='all', type=str, choices=['coco', 'pascal', 'all'])
parser.add_argument('-ef', '--eval_file', type=str, required=True)
parser.add_argument('-lf', '--log_file', type=str)
parser.add_argument('-al', '--auto_all', action='store_true')
# when calculate single model
parser.add_argument('--weights', default="hg_yolo-240000", type=str)
parser.add_argument('--weight_dir', default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if not args.auto_all:
strings = get_config(args.weight_dir)
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(args.weight_dir, args.weights))
# data = COCO_VAL()
data = PASCAL_VAL()
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate single ap from {} {}\n'.format(args.weight_dir, args.weights))
log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(
data.__class__.__name__, ap, args.weights, strings))
else:
data_source = ds_config(args)
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate ap from {}\n'.format(args.eval_file))
model_start = 'hg_yolo'
rootdir = '../' + args.log_file
root_list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
root_list.sort()
for path in root_list:
model_dir = os.path.join(rootdir, path)
models = os.listdir(model_dir)
models = filter(lambda x: x.startswith(model_start), models)
models = list(set(map(lambda x: x.split('.')[0], models)))
models.sort(key=lambda x: int(x[8:]))
for data in data_source:
for model in models:
strings = get_config(model_dir)
tf.reset_default_graph()
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(model_dir, model))
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'.format(
data.__class__.__name__, ap, model, strings))
detector.sess.close()
del net
del detector
del evaluator
if __name__ == '__main__':
main()
# print(os.path.realpath('.'))
# print(os.path.dirname(os.path.realpath('.')))
# print(os.sep)
#
# print(os.path.dirname(os.path.realpath('.')).split(os.sep))
|
normal
|
{
"blob_id": "3bb6305ceb1491db57c7f8b03e438398644c8f90",
"index": 8124,
"step-1": "<mask token>\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config, ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from Eutils.pathmagic import context\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config, ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, self.prob, self.annotations = (self.\n prepare())\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(\n bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n if BBGT.size > 0:\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[\n 2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[\n 3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[\n 2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[\n 3] / 2)\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n inters = iw * ih\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.0\n R['det'][jmax] = 1\n else:\n fp[d] = 1.0\n else:\n fp[d] = 1.0\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.0\n else:\n mrec = np.concatenate(([0.0], self.recall, [1.0]))\n mpre = np.concatenate(([0.0], self.precision, [0.0]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g', '--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str,\n choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n parser.add_argument('--weights', default='hg_yolo-240000', type=str)\n parser.add_argument('--weight_dir', default=\n '../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.\n weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(data.\n __class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir)\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'\n .format(data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from Eutils.pathmagic import context\nwith context():\n import argparse\n import numpy as np\n from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet\n from evaluator.Eutils.pascal_val import PASCAL_VAL\n # from evaluator.Eutils.coco_val import COCO_VAL\n from evaluator.Eutils.detector import Detector\n import utils.config as cfg\n from utils.logger import Logger\n from utils.config_utils import get_config,ds_config\n from tqdm import tqdm\n import tensorflow as tf\n import copy\n import os\n\n\n# import cv2\n# from evaluator.Eutils.draw_result import draw_result\n\n\nclass EVALUATOR(object):\n\n def __init__(self, detector, data):\n self.detector = detector\n self.data = data\n self.gt = self.data.gt\n self.image_ids, self.bboxes, \\\n self.prob, self.annotations = self.prepare()\n self.precision, self.recall = self.pr_curve()\n\n def prepare(self):\n image_ids, bboxes, prob = [], [], []\n annotations = {}\n # while img_batch:\n for i in tqdm(range(self.data.num_batch), desc='batch forward'):\n # print(\"{:5}th batch\".format(i))\n img_batch, bbox_batch = self.data.get_batch()\n results = self.detector.detect_batch(img_batch)\n for ii in range(len(results)):\n boxes_filtered, probs_filtered = results[ii]\n # bbox_gt = bbox_batch[ii]['bbox_det']['bboxes']\n # filter_mat_probs = np.array(probs_filtered >= cfg.THRESHOLD, dtype='bool')\n # filter_mat_probs = np.nonzero(filter_mat_probs)\n # boxes_ft_prob = boxes_filtered[filter_mat_probs]\n # probs_ft_prob = probs_filtered[filter_mat_probs]\n # image = img_batch[ii]\n # draw_result(image, bbox_gt, (0, 0, 255))\n # draw_result(image, boxes_ft_prob, (255, 0, 0))\n # cv2.imshow('Image', image)\n # cv2.waitKey(0)\n image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))\n bboxes.extend(boxes_filtered)\n prob.extend(probs_filtered)\n if bbox_batch[ii]['id'] not in annotations:\n annotations[bbox_batch[ii]['id']] = copy.deepcopy(bbox_batch[ii]['bbox_det'])\n sorted_ind = np.argsort(prob)[::-1]\n sorted_prob = np.sort(prob)[::-1]\n BB = np.array(bboxes)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n return image_ids, BB, sorted_prob, annotations\n\n def pr_curve(self):\n nd = len(self.image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in tqdm(range(nd), desc='painting PR curve'):\n # for d in range(nd):\n R = self.annotations[self.image_ids[d]]\n bb = self.bboxes[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bboxes'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[2] / 2)\n iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[3] / 2)\n ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[2] / 2)\n iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[3] / 2)\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > cfg.IOU_THRESHOLD_GT:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n return prec, rec\n\n def eval(self, use_07_metric=False):\n \"\"\" ap = eval(rec, prec, [use_07_metric])\n Compute AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(self.recall >= t) == 0:\n p = 0\n else:\n p = np.max(self.precision[self.recall >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], self.recall, [1.]))\n mpre = np.concatenate(([0.], self.precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-ims', '--image_size', default=512, type=int)\n parser.add_argument('-g','--gpu', type=str)\n parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')\n parser.add_argument('-ds', '--data_source', default='all', type=str, choices=['coco', 'pascal', 'all'])\n parser.add_argument('-ef', '--eval_file', type=str, required=True)\n parser.add_argument('-lf', '--log_file', type=str)\n parser.add_argument('-al', '--auto_all', action='store_true')\n # when calculate single model\n parser.add_argument('--weights', default=\"hg_yolo-240000\", type=str)\n parser.add_argument('--weight_dir', default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n if not args.auto_all:\n strings = get_config(args.weight_dir)\n\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(args.weight_dir, args.weights))\n # data = COCO_VAL()\n data = PASCAL_VAL()\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate single ap from {} {}\\n'.format(args.weight_dir, args.weights))\n log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(\n data.__class__.__name__, ap, args.weights, strings))\n else:\n data_source = ds_config(args)\n log = Logger(args.eval_file, level='debug')\n log.logger.info('\\n calculate ap from {}\\n'.format(args.eval_file))\n model_start = 'hg_yolo'\n rootdir = '../' + args.log_file\n root_list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n root_list.sort()\n for path in root_list:\n model_dir = os.path.join(rootdir, path)\n models = os.listdir(model_dir)\n models = filter(lambda x: x.startswith(model_start), models)\n models = list(set(map(lambda x: x.split('.')[0], models)))\n models.sort(key=lambda x: int(x[8:]))\n for data in data_source:\n for model in models:\n strings = get_config(model_dir)\n tf.reset_default_graph()\n net = HOURGLASSYOLONet('eval')\n detector = Detector(net, os.path.join(model_dir, model))\n evaluator = EVALUATOR(detector, data)\n ap = evaluator.eval()\n log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'.format(\n data.__class__.__name__, ap, model, strings))\n detector.sess.close()\n del net\n del detector\n del evaluator\n\n\nif __name__ == '__main__':\n main()\n # print(os.path.realpath('.'))\n # print(os.path.dirname(os.path.realpath('.')))\n # print(os.sep)\n #\n # print(os.path.dirname(os.path.realpath('.')).split(os.sep))\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
import zipfile
import cv2
import numpy as np
from sklearn import svm
from sklearn import cross_validation
from sklearn.externals import joblib
import matplotlib.pyplot as plt
""" Global constants """
data_zip = "data.zip" # The zip archive
clean_files = [".csv", ".jpg"] # File extensions to clean
data_file = "data.csv"
img_ext = ".jpg"
perf_file = "performance.txt"
def unzip_data():
""" Unzip the data held in zip file """
zip_ref = zipfile.ZipFile(data_zip, 'r')
zip_ref.extractall('')
zip_ref.close()
def clean_data():
""" Clean up all the unzipped data """
for clean_file in clean_files:
file_list = [f for f in os.listdir(".") if f.endswith(clean_file)]
for f in file_list:
os.remove(f)
def downscale_image(img, bottom, x, y):
"""
Take bottom section of image
Rescale
Canny edge detection
"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
img = cv2.resize(img, (x, y))
#img = cv2.Canny(img, 100, 200)
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
return img
def main():
unzip_data()
labels = []
""" The labels """
data = np.genfromtxt(
data_file, # file name
skip_header=0, # lines to skip at the top
skip_footer=0, # lines to skip at the bottom
delimiter=',', # column delimiter
dtype='int', # data type
filling_values=0, # fill missing values with 0
usecols=(0, 1, 2, 3, 4, 5, 6), # columns to read
names=[
'filename',
'one',
'two',
'three',
'four',
'five',
'six'
] # column names
)
for ones in data['one']:
if ones:
labels.append(1)
else:
labels.append(-1)
""" The features """
x = 5
y = 12
bottom = 0.4
features = []
for name in data['filename']:
""" Load the image """
name_ext = str(name) + img_ext
img = cv2.imread(name_ext, 0)
""" Take bottom section"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
bottom_ext = str(name) + "_bottom_"+ img_ext
cv2.imwrite(bottom_ext,img)
""" Scale down """
img = cv2.resize(img, (x, y))
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
scale_ext = str(name) + "_scale_"+ img_ext
""" Scale back up only to save """
cv2.imwrite(scale_ext,cv2.resize(img, (100*x, 100*y)))
""" Add to list of training features """
features.append(img.flatten())
""" Train and validate the classifier """
loops = 2
acc = 0
mean = []
for i in range(1, loops):
""" Split data for cross validation """
features_train, features_test, labels_train, labels_test = \
cross_validation.train_test_split(features, labels, test_size=0.2, random_state=10)
""" Train """
clf = svm.SVC(gamma=0.001)
clf.fit(features_train, labels_train)
""" Score """
acc += clf.score(features_test, labels_test)
mean.append(acc/i)
""" Write performance to file to keep track """
f = open(perf_file, 'w')
f.write("Performance: " + str(mean[-1]))
f.close()
""" Train on all the data """
clf = svm.SVC(gamma=0.001)
clf.fit(features, labels)
""" Save the classifier """
joblib.dump(clf, "bottom.clf")
""" Decision function """
distances = clf.decision_function(features)
""" False positives and negatives, look out for uncertainity """
for i in range(0,len(distances)):
print i+1,distances[i],
if labels[i] > 0:
if distances[i] < 0:
print "\t\tFALSE NEGATIVE",
else:
print "\t\tPOSITIVE",
else:
if distances[i] > 0:
print "\t\tFALSE POSITIVE",
else:
print "\t\tNEGATIVE",
if(abs(distances[i]) < 0.9):
print "\t\tUNCERTAIN"
else:
print ""
""" remove temp data """
#clean_data()
""" Ensure the mean has converged """
#plt.plot(mean)
#plt.show() # WILL STALL HERE
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "d2da95f44e814accd3a91c5e8497ceff85c98711",
"index": 2848,
"step-1": "import os\nimport zipfile\nimport cv2\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn import cross_validation\nfrom sklearn.externals import joblib\nimport matplotlib.pyplot as plt\n\n\n\"\"\" Global constants \"\"\"\ndata_zip = \"data.zip\" # The zip archive\nclean_files = [\".csv\", \".jpg\"] # File extensions to clean\ndata_file = \"data.csv\"\nimg_ext = \".jpg\"\nperf_file = \"performance.txt\"\n\n\ndef unzip_data():\n \"\"\" Unzip the data held in zip file \"\"\"\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()\n\n\ndef clean_data():\n \"\"\" Clean up all the unzipped data \"\"\"\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)\n\n\ndef downscale_image(img, bottom, x, y):\n \"\"\"\n Take bottom section of image\n Rescale\n Canny edge detection\n \"\"\"\n width, height = tuple(img.shape[1::-1])\n img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]\n img = cv2.resize(img, (x, y))\n #img = cv2.Canny(img, 100, 200)\n ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)\n return img\n\n\ndef main():\n unzip_data()\n\n labels = []\n\n \"\"\" The labels \"\"\"\n data = np.genfromtxt(\n data_file, # file name\n skip_header=0, # lines to skip at the top\n skip_footer=0, # lines to skip at the bottom\n delimiter=',', # column delimiter\n dtype='int', # data type\n filling_values=0, # fill missing values with 0\n usecols=(0, 1, 2, 3, 4, 5, 6), # columns to read\n names=[\n 'filename',\n 'one',\n 'two',\n 'three',\n 'four',\n 'five',\n 'six'\n ] # column names\n )\n for ones in data['one']:\n if ones:\n labels.append(1)\n else:\n labels.append(-1)\n\n \"\"\" The features \"\"\"\n x = 5\n y = 12\n bottom = 0.4\n features = []\n for name in data['filename']:\n \"\"\" Load the image \"\"\"\n name_ext = str(name) + img_ext\n img = cv2.imread(name_ext, 0)\n \"\"\" Take bottom section\"\"\"\n width, height = tuple(img.shape[1::-1])\n img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]\n bottom_ext = str(name) + \"_bottom_\"+ img_ext\n cv2.imwrite(bottom_ext,img)\n \"\"\" Scale down \"\"\"\n img = cv2.resize(img, (x, y))\n ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)\n scale_ext = str(name) + \"_scale_\"+ img_ext\n \"\"\" Scale back up only to save \"\"\"\n cv2.imwrite(scale_ext,cv2.resize(img, (100*x, 100*y)))\n \"\"\" Add to list of training features \"\"\"\n features.append(img.flatten())\n\n \"\"\" Train and validate the classifier \"\"\"\n loops = 2\n acc = 0\n mean = []\n for i in range(1, loops):\n \"\"\" Split data for cross validation \"\"\"\n features_train, features_test, labels_train, labels_test = \\\n cross_validation.train_test_split(features, labels, test_size=0.2, random_state=10)\n\n \"\"\" Train \"\"\"\n clf = svm.SVC(gamma=0.001)\n clf.fit(features_train, labels_train)\n\n \"\"\" Score \"\"\"\n acc += clf.score(features_test, labels_test)\n mean.append(acc/i)\n\n \"\"\" Write performance to file to keep track \"\"\"\n f = open(perf_file, 'w')\n f.write(\"Performance: \" + str(mean[-1]))\n f.close()\n\n \"\"\" Train on all the data \"\"\"\n clf = svm.SVC(gamma=0.001)\n clf.fit(features, labels)\n\n \"\"\" Save the classifier \"\"\"\n joblib.dump(clf, \"bottom.clf\")\n\n \"\"\" Decision function \"\"\"\n distances = clf.decision_function(features)\n\n \"\"\" False positives and negatives, look out for uncertainity \"\"\"\n for i in range(0,len(distances)):\n print i+1,distances[i],\n if labels[i] > 0:\n if distances[i] < 0:\n print \"\\t\\tFALSE NEGATIVE\",\n else:\n print \"\\t\\tPOSITIVE\",\n else:\n if distances[i] > 0:\n print \"\\t\\tFALSE POSITIVE\",\n else:\n print \"\\t\\tNEGATIVE\",\n if(abs(distances[i]) < 0.9):\n print \"\\t\\tUNCERTAIN\"\n else:\n print \"\"\n\n \"\"\" remove temp data \"\"\"\n #clean_data()\n\n \"\"\" Ensure the mean has converged \"\"\"\n #plt.plot(mean)\n #plt.show() # WILL STALL HERE\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
import os
import sys
import subprocess
path = sys.argv[1]
name, ext = os.path.splitext(path)
options = ['g++',
'-O3',
'src/' + path,
'-o', f'./bin/{name}',
'-std=c++11',
'-lgmp']
subprocess.call(options)
subprocess.call([f'./bin/{name}'])
|
normal
|
{
"blob_id": "5dd79f8ebd74099871d4367cafd83359c4f24e26",
"index": 5385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-3": "<mask token>\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++', '-O3', 'src/' + path, '-o', f'./bin/{name}', '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-4": "import os\nimport sys\nimport subprocess\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++', '-O3', 'src/' + path, '-o', f'./bin/{name}', '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-5": "#!/usr/bin/python3\n\nimport os\nimport sys\nimport subprocess\n\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++',\n '-O3',\n 'src/' + path,\n '-o', f'./bin/{name}',\n '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ------------------------------------------------------------
# calclex.py
#
# tokenizer for a simple expression evaluator for
# numbers and +,-,*,/
# ------------------------------------------------------------
import ply.lex as lex
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LKEY = r'\{'
t_RKEY = r'\}'
t_LBRACKET= r'\['
t_RBRACKET= r'\]'
t_TERMINAL= r';'
t_COMMA = r','
t_GREATHAN= r'\<'
t_LESSTHAN= r'\>'
t_DOT = r'\.'
t_TWODOTS = r':'
t_DIFERENT= r'\<\>'
t_EQUAL = r'='
t_TWOEQUAL= r'=='
#all the reserved words
reserved = {
'if' : 'IF',
'then' : 'THEN',
'else' : 'ELSE',
'while' : 'WHILE',
'int' : 'INT',
'float' : 'FLOAT',
'bool' : 'BOOL',
'double' : 'DOUBLE',
'char' : 'CHAR',
'public' : 'PUBLIC',
'private' : 'PRIVATE',
'loop' : 'LOOP',
'function' : 'FUNCTION',
'main' : 'MAIN',
'var' : 'VARS',
'print' : 'PRINT'
}
# List of token names. This is always required
tokens = ['NUMBER','PLUS','MINUS','TIMES','DIVIDE','LPAREN','RPAREN','LKEY','RKEY','LBRACKET','RBRACKET','TERMINAL','ID','COMMA','GREATHAN','LESSTHAN','DOT','TWODOTS','DIFERENT','EQUAL','TWOEQUAL'] + list(reserved.values())
#s reqgular exprsion that takes the fisrts leter then another letter or a number
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
#get al the comments that are with #
def t_COMMENT(t):
r'\#.*'
pass
# No return value. Token discarded
def t_FLOAT(t):
r'\d+\.\d+'
t.value=float(t.value)
return t
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(input,token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
column = find_column(t.value[0],t)
print "Illegal character"+t.value[0] +" in column '%d' and on line " %column
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
# Test it out
data = '''
int main () {
int a= 123.1234;
int b =123412341;
}
'''
# Give the lexer some input
lexer.input(data)
# Tokenize
while True:
tok = lexer.token()
if not tok: break # No more input
print tok
|
normal
|
{
"blob_id": "1530f1711be6313b07df680721daf4cb0a84edc0",
"index": 5502,
"step-1": "# ------------------------------------------------------------\n# calclex.py\n#\n# tokenizer for a simple expression evaluator for\n# numbers and +,-,*,/\n# ------------------------------------------------------------\nimport ply.lex as lex\n\n\n\n\n\n\n# Regular expression rules for simple tokens\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_LKEY = r'\\{' \nt_RKEY = r'\\}'\nt_LBRACKET= r'\\['\nt_RBRACKET= r'\\]' \nt_TERMINAL= r';' \nt_COMMA = r','\nt_GREATHAN= r'\\<'\nt_LESSTHAN= r'\\>' \nt_DOT = r'\\.'\nt_TWODOTS = r':' \nt_DIFERENT= r'\\<\\>' \nt_EQUAL = r'='\nt_TWOEQUAL= r'=='\n\n \n\n#all the reserved words\nreserved = {\n 'if' : 'IF',\n 'then' : 'THEN',\n 'else' : 'ELSE',\n 'while' : 'WHILE',\n 'int' : 'INT',\n 'float' : 'FLOAT',\n 'bool' : 'BOOL',\n 'double' : 'DOUBLE',\n 'char' : 'CHAR',\n 'public' : 'PUBLIC',\n 'private' : 'PRIVATE',\n 'loop' : 'LOOP',\n 'function' : 'FUNCTION',\n 'main' : 'MAIN', \n 'var' : 'VARS', \n 'print' : 'PRINT'\n}\n\n\n# List of token names. This is always required\n\ntokens = ['NUMBER','PLUS','MINUS','TIMES','DIVIDE','LPAREN','RPAREN','LKEY','RKEY','LBRACKET','RBRACKET','TERMINAL','ID','COMMA','GREATHAN','LESSTHAN','DOT','TWODOTS','DIFERENT','EQUAL','TWOEQUAL'] + list(reserved.values())\n\n\n#s reqgular exprsion that takes the fisrts leter then another letter or a number \ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value,'ID') # Check for reserved words\n return t\n\n\n#get al the comments that are with #\ndef t_COMMENT(t):\n r'\\#.*'\n pass\n # No return value. Token discarded\n\ndef t_FLOAT(t):\n r'\\d+\\.\\d+'\n t.value=float(t.value)\n return t\n\n# A regular expression rule with some action code\ndef t_NUMBER(t):\n r'\\d+'\n t.value = int(t.value) \n return t\n\n# Define a rule so we can track line numbers\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n# Compute column. \n# input is the input text string\n# token is a token instance\ndef find_column(input,token):\n last_cr = input.rfind('\\n',0,token.lexpos)\n if last_cr < 0:\n last_cr = 0\n column = (token.lexpos - last_cr) + 1\n return column\n\n\n# A string containing ignored characters (spaces and tabs)\nt_ignore = ' \\t'\n\n# Error handling rule\ndef t_error(t): \n column = find_column(t.value[0],t)\n print \"Illegal character\"+t.value[0] +\" in column '%d' and on line \" %column \n t.lexer.skip(1) \n\n\n\n# Build the lexer\nlexer = lex.lex()\n\n\n# Test it out\ndata = '''\nint main () {\n\nint a= 123.1234;\n\nint b =123412341;\n\n}\n'''\n\n# Give the lexer some input\nlexer.input(data)\n\n# Tokenize\nwhile True:\n tok = lexer.token()\n if not tok: break # No more input\n print tok\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def digital_sum(n):
if n < 10:
return n
return n % 10 + digital_sum(n // 10)
def digital_root(n):
if n < 10:
return n
return digital_root(digital_sum(n))
|
normal
|
{
"blob_id": "e3e6f1b6580a223558791cebfcb1a92d45553162",
"index": 1823,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef digital_root(n):\n if n < 10:\n return n\n return digital_root(digital_sum(n))\n",
"step-3": "def digital_sum(n):\n if n < 10:\n return n\n return n % 10 + digital_sum(n // 10)\n\n\ndef digital_root(n):\n if n < 10:\n return n\n return digital_root(digital_sum(n))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Project Euler - Problem XX
...
"""
# Imports
import time
# Global variables
# Lamda functions
# Functions
# Main functions
def main():
print('Output')
# Execute code
start = time.time()
if __name__ == "__main__":
main()
end = time.time()
print('Run time: {}'.format(end - start))
|
normal
|
{
"blob_id": "cdb07241e08f8ac85a427c5b2bc3effca3917c85",
"index": 2188,
"step-1": "<mask token>\n\n\ndef main():\n print('Output')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('Output')\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n<mask token>\nprint('Run time: {}'.format(end - start))\n",
"step-3": "<mask token>\n\n\ndef main():\n print('Output')\n\n\nstart = time.time()\nif __name__ == '__main__':\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start))\n",
"step-4": "<mask token>\nimport time\n\n\ndef main():\n print('Output')\n\n\nstart = time.time()\nif __name__ == '__main__':\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler - Problem XX\n...\n\"\"\"\n\n# Imports\nimport time\n\n# Global variables\n\n# Lamda functions\n\n# Functions\n\n# Main functions\ndef main():\n print('Output') \n\n# Execute code\nstart = time.time()\nif __name__ == \"__main__\":\n main()\nend = time.time()\nprint('Run time: {}'.format(end - start)) \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from bacalhau.tei_document import TEIDocument
import nltk
import unittest
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.
WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f86d01c4b980ac44dcdb1b0008493e1dbda25971",
"index": 4544,
"step-1": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from bacalhau.tei_document import TEIDocument\nimport nltk\nimport unittest\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
import sys
sys.stdin = open('10989.txt', 'r')
counting_list = [0 for _ in range(10001)]
N = int(sys.stdin.readline())
for n in range(N):
counting_list[int(sys.stdin.readline())] += 1
for i, v in enumerate(counting_list):
if v:
sys.stdout.write((str(i) + '\n') * v)
|
normal
|
{
"blob_id": "efca954e1977a6f6ac9a966b3c84ba80f5b7a663",
"index": 690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-3": "<mask token>\nsys.stdin = open('10989.txt', 'r')\ncounting_list = [(0) for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-4": "import sys\nsys.stdin = open('10989.txt', 'r')\ncounting_list = [(0) for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-5": "import sys\nsys.stdin = open('10989.txt', 'r')\n\ncounting_list = [0 for _ in range(10001)]\nN = int(sys.stdin.readline())\nfor n in range(N):\n counting_list[int(sys.stdin.readline())] += 1\n\nfor i, v in enumerate(counting_list):\n if v:\n sys.stdout.write((str(i) + '\\n') * v)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#classes that store values related to levels
from mg_cus_struct import *
from mg_movement import *
import copy
class BulletTemplate(object) :
def __init__(self, animationName, initialVelocity, hitbox) :
self._spawningCycle = 0
self._animationName = animationName
self._initialVelocity = initialVelocity
self._movementList = dict()
self._hitbox = hitbox
def addMovementCommand(self, cycle, movementCommand) :
self._movementList[cycle] = movementCommand
class BulletSpawnerTemplate(object) :
def __init__(self, initialPosition, initialVelocity) :
self._spawningCycle = 0
self._initialPosition = initialPosition
self._initialVelocity = initialVelocity
self._movementList = dict()
self._displacement = 0
self._exitLocations = []
self._rotationSpeed = 0
self._initialDelay = 0
self._sprayTimer = []
self._inBetweenTimer = 0
self._rounds = -1
self._bulletTemplate = None
#mask
self._maskName = ""
self._maskLayer = 0
def addSprayTimer(self, sprayTimer) :
self._sprayTimer.extend(sprayTimer)
def setRounds(self, rounds) :
self._rounds = rounds
def setInitialDelay(self, initialDelay) :
self._initialDelay = initialDelay
def setInBetweenTimer(self, delay) :
self._inBetweenTimer = delay
def addExitLocation(self, location) :
self._exitLocations.append(location)
def addBulletTemplate(self, bulletTemplate) :
self._bulletTemplate = bulletTemplate
def addMovementCommand(self, cycle, movementCommand) :
self._movementList[cycle] = movementCommand
def addMask(self, maskName, maskLayer) :
self._maskName = maskName
self._maskLayer = maskLayer
class BulletMasterTemplate(object) :
def __init__(self, name) :
self._name = name
self._bulletSpawnerTemplates = []
self._powerUpTable = {
"life" : 0,
"power" : 0,
"spell" : 0,
"points" : 0,
}
def addBulletSpawnerTemplates(self, bulletSpawnerTemplate) :
self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)
class Bullet(MovementCommander) :
def __init__(self, bulletTemplate, position, exitAngle, master, spawningCycle) :
temp = copy.deepcopy(bulletTemplate._initialVelocity)
temp._angle = temp._angle + exitAngle
super().__init__(position, temp, spawningCycle)
self.addStartingParameters(position, temp)
self._animationName = bulletTemplate._animationName
for i in bulletTemplate._movementList :
self.addMovementCommandDirect(i, bulletTemplate._movementList[i])
self.calculatePositions(master, master._playerPosition, [-100, -100, 1620, 1180], None)
class BulletSpawner(MovementCommander) :
def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy, spawningCycle) :
self._internalCounter = 0
self._exitLocations = []
self._displacement = 0.0
self._master = master
self._displacement = bulletSpawnerTemplate._displacement
for i in bulletSpawnerTemplate._exitLocations :
self._exitLocations.append(i)
self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed
self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate
self._spawningCycle = enemy._spawningCycle
self._seenCycle = enemy._spawningCycle
self._deathCycle = enemy._deathCycle
self._sprayTimer = bulletSpawnerTemplate._sprayTimer
self._initialDelay = bulletSpawnerTemplate._initialDelay
try :
self._lengthOfSpray = max(self._sprayTimer)
except ValueError:
self._lengthOfSpray = 0
self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer
self._rounds = bulletSpawnerTemplate._rounds
super().__init__(bulletSpawnerTemplate._initialPosition, bulletSpawnerTemplate._initialVelocity, spawningCycle)
self.calculatePositions(master, master._playerPosition, None, masterPosition)
#apply masks
self._maskName = bulletSpawnerTemplate._maskName
self._maskLayer = bulletSpawnerTemplate._maskLayer
def calculateBullets(self) :
returnList = []
mode = "initialDelayMode"
switchCounter = -1
currentRound = 0
for i in self._positionList :
self._internalCounter = self._internalCounter + 1
switchCounter = switchCounter + 1
if mode == "initialDelayMode" :
if switchCounter >= self._initialDelay :
mode = "sprayMode"
switchCounter = -1
self._seenCycle = self._spawningCycle + self._internalCounter
elif mode == "sprayMode" :
if switchCounter in self._sprayTimer :
for j in self._exitLocations :
offset = CUS_Polar(self._displacement, j)
pos = CUS_Point(0.0, 0.0)
pos.add(toPoint(offset))
pos._x = pos._x + i._x
pos._y = pos._y + i._y
bullet = Bullet(self._bulletTemplate, pos, j, self._master, self._spawningCycle+self._internalCounter)
returnList.append(bullet)
if switchCounter >= self._lengthOfSpray :
mode = "inBetweenTimerMode"
currentRound = currentRound + 1
switchCounter = -1
elif mode == "inBetweenTimerMode" :
if switchCounter >= self._inBetweenTimer :
mode = "sprayMode"
switchCounter = -1
if currentRound >= self._rounds and self._rounds is not -1 :
mode = "sprayOverMode"
self._deathCycle = self._spawningCycle + self._internalCounter
return returnList
class BulletMaster(object) :
def __init__(self, bulletMasterTemplate, masterPositionList, master, enemy, spawningCycle) :
self._name = bulletMasterTemplate._name
self._bulletSpawners = []
for i in bulletMasterTemplate._bulletSpawnerTemplates :
self._bulletSpawners.append(BulletSpawner(i, masterPositionList, master, enemy, spawningCycle))
def calculateBullets(self) :
returnList = []
for i in self._bulletSpawners :
returnList.extend(i.calculateBullets())
return returnList
|
normal
|
{
"blob_id": "519746450826d02230a492a99e0b518602d53fcb",
"index": 9932,
"step-1": "<mask token>\n\n\nclass BulletSpawnerTemplate(object):\n <mask token>\n <mask token>\n\n def setRounds(self, rounds):\n self._rounds = rounds\n <mask token>\n\n def setInBetweenTimer(self, delay):\n self._inBetweenTimer = delay\n <mask token>\n\n def addBulletTemplate(self, bulletTemplate):\n self._bulletTemplate = bulletTemplate\n\n def addMovementCommand(self, cycle, movementCommand):\n self._movementList[cycle] = movementCommand\n <mask token>\n\n\nclass BulletMasterTemplate(object):\n\n def __init__(self, name):\n self._name = name\n self._bulletSpawnerTemplates = []\n self._powerUpTable = {'life': 0, 'power': 0, 'spell': 0, 'points': 0}\n\n def addBulletSpawnerTemplates(self, bulletSpawnerTemplate):\n self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)\n\n\nclass Bullet(MovementCommander):\n\n def __init__(self, bulletTemplate, position, exitAngle, master,\n spawningCycle):\n temp = copy.deepcopy(bulletTemplate._initialVelocity)\n temp._angle = temp._angle + exitAngle\n super().__init__(position, temp, spawningCycle)\n self.addStartingParameters(position, temp)\n self._animationName = bulletTemplate._animationName\n for i in bulletTemplate._movementList:\n self.addMovementCommandDirect(i, bulletTemplate._movementList[i])\n self.calculatePositions(master, master._playerPosition, [-100, -100,\n 1620, 1180], None)\n\n\nclass BulletSpawner(MovementCommander):\n\n def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy,\n spawningCycle):\n self._internalCounter = 0\n self._exitLocations = []\n self._displacement = 0.0\n self._master = master\n self._displacement = bulletSpawnerTemplate._displacement\n for i in bulletSpawnerTemplate._exitLocations:\n self._exitLocations.append(i)\n self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed\n self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate\n self._spawningCycle = enemy._spawningCycle\n self._seenCycle = enemy._spawningCycle\n self._deathCycle = enemy._deathCycle\n self._sprayTimer = bulletSpawnerTemplate._sprayTimer\n self._initialDelay = bulletSpawnerTemplate._initialDelay\n try:\n self._lengthOfSpray = max(self._sprayTimer)\n except ValueError:\n self._lengthOfSpray = 0\n self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer\n self._rounds = bulletSpawnerTemplate._rounds\n super().__init__(bulletSpawnerTemplate._initialPosition,\n bulletSpawnerTemplate._initialVelocity, spawningCycle)\n self.calculatePositions(master, master._playerPosition, None,\n masterPosition)\n self._maskName = bulletSpawnerTemplate._maskName\n self._maskLayer = bulletSpawnerTemplate._maskLayer\n\n def calculateBullets(self):\n returnList = []\n mode = 'initialDelayMode'\n switchCounter = -1\n currentRound = 0\n for i in self._positionList:\n self._internalCounter = self._internalCounter + 1\n switchCounter = switchCounter + 1\n if mode == 'initialDelayMode':\n if switchCounter >= self._initialDelay:\n mode = 'sprayMode'\n switchCounter = -1\n self._seenCycle = (self._spawningCycle + self.\n _internalCounter)\n elif mode == 'sprayMode':\n if switchCounter in self._sprayTimer:\n for j in self._exitLocations:\n offset = CUS_Polar(self._displacement, j)\n pos = CUS_Point(0.0, 0.0)\n pos.add(toPoint(offset))\n pos._x = pos._x + i._x\n pos._y = pos._y + i._y\n bullet = Bullet(self._bulletTemplate, pos, j, self.\n _master, self._spawningCycle + self.\n _internalCounter)\n returnList.append(bullet)\n if switchCounter >= self._lengthOfSpray:\n mode = 'inBetweenTimerMode'\n currentRound = currentRound + 1\n switchCounter = -1\n elif mode == 'inBetweenTimerMode':\n if switchCounter >= self._inBetweenTimer:\n mode = 'sprayMode'\n switchCounter = -1\n if currentRound >= self._rounds and self._rounds is not -1:\n mode = 'sprayOverMode'\n self._deathCycle = (self._spawningCycle + self.\n _internalCounter)\n return returnList\n\n\nclass BulletMaster(object):\n\n def __init__(self, bulletMasterTemplate, masterPositionList, master,\n enemy, spawningCycle):\n self._name = bulletMasterTemplate._name\n self._bulletSpawners = []\n for i in bulletMasterTemplate._bulletSpawnerTemplates:\n self._bulletSpawners.append(BulletSpawner(i, masterPositionList,\n master, enemy, spawningCycle))\n\n def calculateBullets(self):\n returnList = []\n for i in self._bulletSpawners:\n returnList.extend(i.calculateBullets())\n return returnList\n",
"step-2": "<mask token>\n\n\nclass BulletSpawnerTemplate(object):\n\n def __init__(self, initialPosition, initialVelocity):\n self._spawningCycle = 0\n self._initialPosition = initialPosition\n self._initialVelocity = initialVelocity\n self._movementList = dict()\n self._displacement = 0\n self._exitLocations = []\n self._rotationSpeed = 0\n self._initialDelay = 0\n self._sprayTimer = []\n self._inBetweenTimer = 0\n self._rounds = -1\n self._bulletTemplate = None\n self._maskName = ''\n self._maskLayer = 0\n <mask token>\n\n def setRounds(self, rounds):\n self._rounds = rounds\n\n def setInitialDelay(self, initialDelay):\n self._initialDelay = initialDelay\n\n def setInBetweenTimer(self, delay):\n self._inBetweenTimer = delay\n <mask token>\n\n def addBulletTemplate(self, bulletTemplate):\n self._bulletTemplate = bulletTemplate\n\n def addMovementCommand(self, cycle, movementCommand):\n self._movementList[cycle] = movementCommand\n\n def addMask(self, maskName, maskLayer):\n self._maskName = maskName\n self._maskLayer = maskLayer\n\n\nclass BulletMasterTemplate(object):\n\n def __init__(self, name):\n self._name = name\n self._bulletSpawnerTemplates = []\n self._powerUpTable = {'life': 0, 'power': 0, 'spell': 0, 'points': 0}\n\n def addBulletSpawnerTemplates(self, bulletSpawnerTemplate):\n self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)\n\n\nclass Bullet(MovementCommander):\n\n def __init__(self, bulletTemplate, position, exitAngle, master,\n spawningCycle):\n temp = copy.deepcopy(bulletTemplate._initialVelocity)\n temp._angle = temp._angle + exitAngle\n super().__init__(position, temp, spawningCycle)\n self.addStartingParameters(position, temp)\n self._animationName = bulletTemplate._animationName\n for i in bulletTemplate._movementList:\n self.addMovementCommandDirect(i, bulletTemplate._movementList[i])\n self.calculatePositions(master, master._playerPosition, [-100, -100,\n 1620, 1180], None)\n\n\nclass BulletSpawner(MovementCommander):\n\n def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy,\n spawningCycle):\n self._internalCounter = 0\n self._exitLocations = []\n self._displacement = 0.0\n self._master = master\n self._displacement = bulletSpawnerTemplate._displacement\n for i in bulletSpawnerTemplate._exitLocations:\n self._exitLocations.append(i)\n self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed\n self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate\n self._spawningCycle = enemy._spawningCycle\n self._seenCycle = enemy._spawningCycle\n self._deathCycle = enemy._deathCycle\n self._sprayTimer = bulletSpawnerTemplate._sprayTimer\n self._initialDelay = bulletSpawnerTemplate._initialDelay\n try:\n self._lengthOfSpray = max(self._sprayTimer)\n except ValueError:\n self._lengthOfSpray = 0\n self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer\n self._rounds = bulletSpawnerTemplate._rounds\n super().__init__(bulletSpawnerTemplate._initialPosition,\n bulletSpawnerTemplate._initialVelocity, spawningCycle)\n self.calculatePositions(master, master._playerPosition, None,\n masterPosition)\n self._maskName = bulletSpawnerTemplate._maskName\n self._maskLayer = bulletSpawnerTemplate._maskLayer\n\n def calculateBullets(self):\n returnList = []\n mode = 'initialDelayMode'\n switchCounter = -1\n currentRound = 0\n for i in self._positionList:\n self._internalCounter = self._internalCounter + 1\n switchCounter = switchCounter + 1\n if mode == 'initialDelayMode':\n if switchCounter >= self._initialDelay:\n mode = 'sprayMode'\n switchCounter = -1\n self._seenCycle = (self._spawningCycle + self.\n _internalCounter)\n elif mode == 'sprayMode':\n if switchCounter in self._sprayTimer:\n for j in self._exitLocations:\n offset = CUS_Polar(self._displacement, j)\n pos = CUS_Point(0.0, 0.0)\n pos.add(toPoint(offset))\n pos._x = pos._x + i._x\n pos._y = pos._y + i._y\n bullet = Bullet(self._bulletTemplate, pos, j, self.\n _master, self._spawningCycle + self.\n _internalCounter)\n returnList.append(bullet)\n if switchCounter >= self._lengthOfSpray:\n mode = 'inBetweenTimerMode'\n currentRound = currentRound + 1\n switchCounter = -1\n elif mode == 'inBetweenTimerMode':\n if switchCounter >= self._inBetweenTimer:\n mode = 'sprayMode'\n switchCounter = -1\n if currentRound >= self._rounds and self._rounds is not -1:\n mode = 'sprayOverMode'\n self._deathCycle = (self._spawningCycle + self.\n _internalCounter)\n return returnList\n\n\nclass BulletMaster(object):\n\n def __init__(self, bulletMasterTemplate, masterPositionList, master,\n enemy, spawningCycle):\n self._name = bulletMasterTemplate._name\n self._bulletSpawners = []\n for i in bulletMasterTemplate._bulletSpawnerTemplates:\n self._bulletSpawners.append(BulletSpawner(i, masterPositionList,\n master, enemy, spawningCycle))\n\n def calculateBullets(self):\n returnList = []\n for i in self._bulletSpawners:\n returnList.extend(i.calculateBullets())\n return returnList\n",
"step-3": "<mask token>\n\n\nclass BulletTemplate(object):\n <mask token>\n <mask token>\n\n\nclass BulletSpawnerTemplate(object):\n\n def __init__(self, initialPosition, initialVelocity):\n self._spawningCycle = 0\n self._initialPosition = initialPosition\n self._initialVelocity = initialVelocity\n self._movementList = dict()\n self._displacement = 0\n self._exitLocations = []\n self._rotationSpeed = 0\n self._initialDelay = 0\n self._sprayTimer = []\n self._inBetweenTimer = 0\n self._rounds = -1\n self._bulletTemplate = None\n self._maskName = ''\n self._maskLayer = 0\n\n def addSprayTimer(self, sprayTimer):\n self._sprayTimer.extend(sprayTimer)\n\n def setRounds(self, rounds):\n self._rounds = rounds\n\n def setInitialDelay(self, initialDelay):\n self._initialDelay = initialDelay\n\n def setInBetweenTimer(self, delay):\n self._inBetweenTimer = delay\n\n def addExitLocation(self, location):\n self._exitLocations.append(location)\n\n def addBulletTemplate(self, bulletTemplate):\n self._bulletTemplate = bulletTemplate\n\n def addMovementCommand(self, cycle, movementCommand):\n self._movementList[cycle] = movementCommand\n\n def addMask(self, maskName, maskLayer):\n self._maskName = maskName\n self._maskLayer = maskLayer\n\n\nclass BulletMasterTemplate(object):\n\n def __init__(self, name):\n self._name = name\n self._bulletSpawnerTemplates = []\n self._powerUpTable = {'life': 0, 'power': 0, 'spell': 0, 'points': 0}\n\n def addBulletSpawnerTemplates(self, bulletSpawnerTemplate):\n self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)\n\n\nclass Bullet(MovementCommander):\n\n def __init__(self, bulletTemplate, position, exitAngle, master,\n spawningCycle):\n temp = copy.deepcopy(bulletTemplate._initialVelocity)\n temp._angle = temp._angle + exitAngle\n super().__init__(position, temp, spawningCycle)\n self.addStartingParameters(position, temp)\n self._animationName = bulletTemplate._animationName\n for i in bulletTemplate._movementList:\n self.addMovementCommandDirect(i, bulletTemplate._movementList[i])\n self.calculatePositions(master, master._playerPosition, [-100, -100,\n 1620, 1180], None)\n\n\nclass BulletSpawner(MovementCommander):\n\n def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy,\n spawningCycle):\n self._internalCounter = 0\n self._exitLocations = []\n self._displacement = 0.0\n self._master = master\n self._displacement = bulletSpawnerTemplate._displacement\n for i in bulletSpawnerTemplate._exitLocations:\n self._exitLocations.append(i)\n self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed\n self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate\n self._spawningCycle = enemy._spawningCycle\n self._seenCycle = enemy._spawningCycle\n self._deathCycle = enemy._deathCycle\n self._sprayTimer = bulletSpawnerTemplate._sprayTimer\n self._initialDelay = bulletSpawnerTemplate._initialDelay\n try:\n self._lengthOfSpray = max(self._sprayTimer)\n except ValueError:\n self._lengthOfSpray = 0\n self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer\n self._rounds = bulletSpawnerTemplate._rounds\n super().__init__(bulletSpawnerTemplate._initialPosition,\n bulletSpawnerTemplate._initialVelocity, spawningCycle)\n self.calculatePositions(master, master._playerPosition, None,\n masterPosition)\n self._maskName = bulletSpawnerTemplate._maskName\n self._maskLayer = bulletSpawnerTemplate._maskLayer\n\n def calculateBullets(self):\n returnList = []\n mode = 'initialDelayMode'\n switchCounter = -1\n currentRound = 0\n for i in self._positionList:\n self._internalCounter = self._internalCounter + 1\n switchCounter = switchCounter + 1\n if mode == 'initialDelayMode':\n if switchCounter >= self._initialDelay:\n mode = 'sprayMode'\n switchCounter = -1\n self._seenCycle = (self._spawningCycle + self.\n _internalCounter)\n elif mode == 'sprayMode':\n if switchCounter in self._sprayTimer:\n for j in self._exitLocations:\n offset = CUS_Polar(self._displacement, j)\n pos = CUS_Point(0.0, 0.0)\n pos.add(toPoint(offset))\n pos._x = pos._x + i._x\n pos._y = pos._y + i._y\n bullet = Bullet(self._bulletTemplate, pos, j, self.\n _master, self._spawningCycle + self.\n _internalCounter)\n returnList.append(bullet)\n if switchCounter >= self._lengthOfSpray:\n mode = 'inBetweenTimerMode'\n currentRound = currentRound + 1\n switchCounter = -1\n elif mode == 'inBetweenTimerMode':\n if switchCounter >= self._inBetweenTimer:\n mode = 'sprayMode'\n switchCounter = -1\n if currentRound >= self._rounds and self._rounds is not -1:\n mode = 'sprayOverMode'\n self._deathCycle = (self._spawningCycle + self.\n _internalCounter)\n return returnList\n\n\nclass BulletMaster(object):\n\n def __init__(self, bulletMasterTemplate, masterPositionList, master,\n enemy, spawningCycle):\n self._name = bulletMasterTemplate._name\n self._bulletSpawners = []\n for i in bulletMasterTemplate._bulletSpawnerTemplates:\n self._bulletSpawners.append(BulletSpawner(i, masterPositionList,\n master, enemy, spawningCycle))\n\n def calculateBullets(self):\n returnList = []\n for i in self._bulletSpawners:\n returnList.extend(i.calculateBullets())\n return returnList\n",
"step-4": "from mg_cus_struct import *\nfrom mg_movement import *\nimport copy\n\n\nclass BulletTemplate(object):\n\n def __init__(self, animationName, initialVelocity, hitbox):\n self._spawningCycle = 0\n self._animationName = animationName\n self._initialVelocity = initialVelocity\n self._movementList = dict()\n self._hitbox = hitbox\n\n def addMovementCommand(self, cycle, movementCommand):\n self._movementList[cycle] = movementCommand\n\n\nclass BulletSpawnerTemplate(object):\n\n def __init__(self, initialPosition, initialVelocity):\n self._spawningCycle = 0\n self._initialPosition = initialPosition\n self._initialVelocity = initialVelocity\n self._movementList = dict()\n self._displacement = 0\n self._exitLocations = []\n self._rotationSpeed = 0\n self._initialDelay = 0\n self._sprayTimer = []\n self._inBetweenTimer = 0\n self._rounds = -1\n self._bulletTemplate = None\n self._maskName = ''\n self._maskLayer = 0\n\n def addSprayTimer(self, sprayTimer):\n self._sprayTimer.extend(sprayTimer)\n\n def setRounds(self, rounds):\n self._rounds = rounds\n\n def setInitialDelay(self, initialDelay):\n self._initialDelay = initialDelay\n\n def setInBetweenTimer(self, delay):\n self._inBetweenTimer = delay\n\n def addExitLocation(self, location):\n self._exitLocations.append(location)\n\n def addBulletTemplate(self, bulletTemplate):\n self._bulletTemplate = bulletTemplate\n\n def addMovementCommand(self, cycle, movementCommand):\n self._movementList[cycle] = movementCommand\n\n def addMask(self, maskName, maskLayer):\n self._maskName = maskName\n self._maskLayer = maskLayer\n\n\nclass BulletMasterTemplate(object):\n\n def __init__(self, name):\n self._name = name\n self._bulletSpawnerTemplates = []\n self._powerUpTable = {'life': 0, 'power': 0, 'spell': 0, 'points': 0}\n\n def addBulletSpawnerTemplates(self, bulletSpawnerTemplate):\n self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)\n\n\nclass Bullet(MovementCommander):\n\n def __init__(self, bulletTemplate, position, exitAngle, master,\n spawningCycle):\n temp = copy.deepcopy(bulletTemplate._initialVelocity)\n temp._angle = temp._angle + exitAngle\n super().__init__(position, temp, spawningCycle)\n self.addStartingParameters(position, temp)\n self._animationName = bulletTemplate._animationName\n for i in bulletTemplate._movementList:\n self.addMovementCommandDirect(i, bulletTemplate._movementList[i])\n self.calculatePositions(master, master._playerPosition, [-100, -100,\n 1620, 1180], None)\n\n\nclass BulletSpawner(MovementCommander):\n\n def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy,\n spawningCycle):\n self._internalCounter = 0\n self._exitLocations = []\n self._displacement = 0.0\n self._master = master\n self._displacement = bulletSpawnerTemplate._displacement\n for i in bulletSpawnerTemplate._exitLocations:\n self._exitLocations.append(i)\n self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed\n self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate\n self._spawningCycle = enemy._spawningCycle\n self._seenCycle = enemy._spawningCycle\n self._deathCycle = enemy._deathCycle\n self._sprayTimer = bulletSpawnerTemplate._sprayTimer\n self._initialDelay = bulletSpawnerTemplate._initialDelay\n try:\n self._lengthOfSpray = max(self._sprayTimer)\n except ValueError:\n self._lengthOfSpray = 0\n self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer\n self._rounds = bulletSpawnerTemplate._rounds\n super().__init__(bulletSpawnerTemplate._initialPosition,\n bulletSpawnerTemplate._initialVelocity, spawningCycle)\n self.calculatePositions(master, master._playerPosition, None,\n masterPosition)\n self._maskName = bulletSpawnerTemplate._maskName\n self._maskLayer = bulletSpawnerTemplate._maskLayer\n\n def calculateBullets(self):\n returnList = []\n mode = 'initialDelayMode'\n switchCounter = -1\n currentRound = 0\n for i in self._positionList:\n self._internalCounter = self._internalCounter + 1\n switchCounter = switchCounter + 1\n if mode == 'initialDelayMode':\n if switchCounter >= self._initialDelay:\n mode = 'sprayMode'\n switchCounter = -1\n self._seenCycle = (self._spawningCycle + self.\n _internalCounter)\n elif mode == 'sprayMode':\n if switchCounter in self._sprayTimer:\n for j in self._exitLocations:\n offset = CUS_Polar(self._displacement, j)\n pos = CUS_Point(0.0, 0.0)\n pos.add(toPoint(offset))\n pos._x = pos._x + i._x\n pos._y = pos._y + i._y\n bullet = Bullet(self._bulletTemplate, pos, j, self.\n _master, self._spawningCycle + self.\n _internalCounter)\n returnList.append(bullet)\n if switchCounter >= self._lengthOfSpray:\n mode = 'inBetweenTimerMode'\n currentRound = currentRound + 1\n switchCounter = -1\n elif mode == 'inBetweenTimerMode':\n if switchCounter >= self._inBetweenTimer:\n mode = 'sprayMode'\n switchCounter = -1\n if currentRound >= self._rounds and self._rounds is not -1:\n mode = 'sprayOverMode'\n self._deathCycle = (self._spawningCycle + self.\n _internalCounter)\n return returnList\n\n\nclass BulletMaster(object):\n\n def __init__(self, bulletMasterTemplate, masterPositionList, master,\n enemy, spawningCycle):\n self._name = bulletMasterTemplate._name\n self._bulletSpawners = []\n for i in bulletMasterTemplate._bulletSpawnerTemplates:\n self._bulletSpawners.append(BulletSpawner(i, masterPositionList,\n master, enemy, spawningCycle))\n\n def calculateBullets(self):\n returnList = []\n for i in self._bulletSpawners:\n returnList.extend(i.calculateBullets())\n return returnList\n",
"step-5": "#classes that store values related to levels\nfrom mg_cus_struct import *\nfrom mg_movement import *\nimport copy\n\nclass BulletTemplate(object) :\n def __init__(self, animationName, initialVelocity, hitbox) :\n self._spawningCycle = 0\n self._animationName = animationName\n self._initialVelocity = initialVelocity\n self._movementList = dict()\n self._hitbox = hitbox\n\n def addMovementCommand(self, cycle, movementCommand) :\n self._movementList[cycle] = movementCommand\n\nclass BulletSpawnerTemplate(object) :\n def __init__(self, initialPosition, initialVelocity) :\n self._spawningCycle = 0\n self._initialPosition = initialPosition\n self._initialVelocity = initialVelocity\n \n self._movementList = dict()\n \n self._displacement = 0\n self._exitLocations = []\n self._rotationSpeed = 0\n \n self._initialDelay = 0\n self._sprayTimer = []\n self._inBetweenTimer = 0\n self._rounds = -1\n \n self._bulletTemplate = None\n\n #mask\n self._maskName = \"\"\n self._maskLayer = 0\n\n def addSprayTimer(self, sprayTimer) :\n self._sprayTimer.extend(sprayTimer)\n\n def setRounds(self, rounds) :\n self._rounds = rounds\n\n def setInitialDelay(self, initialDelay) :\n self._initialDelay = initialDelay\n\n def setInBetweenTimer(self, delay) :\n self._inBetweenTimer = delay\n\n def addExitLocation(self, location) :\n self._exitLocations.append(location)\n\n def addBulletTemplate(self, bulletTemplate) :\n self._bulletTemplate = bulletTemplate\n\n def addMovementCommand(self, cycle, movementCommand) :\n self._movementList[cycle] = movementCommand\n\n def addMask(self, maskName, maskLayer) :\n self._maskName = maskName\n self._maskLayer = maskLayer\n \nclass BulletMasterTemplate(object) :\n def __init__(self, name) :\n self._name = name\n self._bulletSpawnerTemplates = []\n self._powerUpTable = {\n \"life\" : 0,\n \"power\" : 0,\n \"spell\" : 0,\n \"points\" : 0, \n }\n\n def addBulletSpawnerTemplates(self, bulletSpawnerTemplate) :\n self._bulletSpawnerTemplates.append(bulletSpawnerTemplate)\n \nclass Bullet(MovementCommander) :\n def __init__(self, bulletTemplate, position, exitAngle, master, spawningCycle) :\n temp = copy.deepcopy(bulletTemplate._initialVelocity)\n temp._angle = temp._angle + exitAngle\n\n super().__init__(position, temp, spawningCycle)\n self.addStartingParameters(position, temp)\n\n self._animationName = bulletTemplate._animationName\n\n for i in bulletTemplate._movementList :\n self.addMovementCommandDirect(i, bulletTemplate._movementList[i])\n\n self.calculatePositions(master, master._playerPosition, [-100, -100, 1620, 1180], None)\n\nclass BulletSpawner(MovementCommander) :\n def __init__(self, bulletSpawnerTemplate, masterPosition, master, enemy, spawningCycle) :\n self._internalCounter = 0\n self._exitLocations = []\n self._displacement = 0.0\n\n self._master = master\n \n self._displacement = bulletSpawnerTemplate._displacement \n for i in bulletSpawnerTemplate._exitLocations :\n self._exitLocations.append(i)\n self._rotationSpeed = bulletSpawnerTemplate._rotationSpeed\n self._bulletTemplate = bulletSpawnerTemplate._bulletTemplate\n\n self._spawningCycle = enemy._spawningCycle\n self._seenCycle = enemy._spawningCycle\n self._deathCycle = enemy._deathCycle\n \n self._sprayTimer = bulletSpawnerTemplate._sprayTimer\n self._initialDelay = bulletSpawnerTemplate._initialDelay\n \n try :\n self._lengthOfSpray = max(self._sprayTimer)\n except ValueError:\n self._lengthOfSpray = 0\n \n self._inBetweenTimer = bulletSpawnerTemplate._inBetweenTimer\n self._rounds = bulletSpawnerTemplate._rounds\n\n super().__init__(bulletSpawnerTemplate._initialPosition, bulletSpawnerTemplate._initialVelocity, spawningCycle)\n \n self.calculatePositions(master, master._playerPosition, None, masterPosition)\n\n #apply masks\n self._maskName = bulletSpawnerTemplate._maskName\n self._maskLayer = bulletSpawnerTemplate._maskLayer\n\n def calculateBullets(self) :\n returnList = []\n mode = \"initialDelayMode\"\n switchCounter = -1\n currentRound = 0\n for i in self._positionList :\n self._internalCounter = self._internalCounter + 1\n switchCounter = switchCounter + 1\n if mode == \"initialDelayMode\" :\n if switchCounter >= self._initialDelay :\n mode = \"sprayMode\"\n switchCounter = -1\n self._seenCycle = self._spawningCycle + self._internalCounter\n elif mode == \"sprayMode\" :\n if switchCounter in self._sprayTimer :\n for j in self._exitLocations :\n offset = CUS_Polar(self._displacement, j)\n pos = CUS_Point(0.0, 0.0)\n pos.add(toPoint(offset))\n pos._x = pos._x + i._x\n pos._y = pos._y + i._y\n bullet = Bullet(self._bulletTemplate, pos, j, self._master, self._spawningCycle+self._internalCounter)\n returnList.append(bullet)\n if switchCounter >= self._lengthOfSpray :\n mode = \"inBetweenTimerMode\"\n currentRound = currentRound + 1\n switchCounter = -1\n elif mode == \"inBetweenTimerMode\" :\n if switchCounter >= self._inBetweenTimer :\n mode = \"sprayMode\"\n switchCounter = -1\n if currentRound >= self._rounds and self._rounds is not -1 :\n mode = \"sprayOverMode\"\n self._deathCycle = self._spawningCycle + self._internalCounter\n\n return returnList\n \nclass BulletMaster(object) :\n def __init__(self, bulletMasterTemplate, masterPositionList, master, enemy, spawningCycle) :\n self._name = bulletMasterTemplate._name\n\n self._bulletSpawners = []\n\n for i in bulletMasterTemplate._bulletSpawnerTemplates :\n self._bulletSpawners.append(BulletSpawner(i, masterPositionList, master, enemy, spawningCycle))\n\n def calculateBullets(self) :\n returnList = []\n for i in self._bulletSpawners :\n returnList.extend(i.calculateBullets())\n\n return returnList\n \n\n \n \n",
"step-ids": [
16,
19,
22,
25,
26
]
}
|
[
16,
19,
22,
25,
26
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.