code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def accumulate(self, buf):
'''add in some more bytes'''
accum = self.crc
for b in buf:
tmp = b ^ (accum & 0xff)
tmp = (tmp ^ (tmp<<4)) & 0xFF
accum = (accum>>8) ^ (tmp<<8) ^ (tmp<<3) ^ (tmp>>4)
self.crc = accum | add in some more bytes | Below is the the instruction that describes the task:
### Input:
add in some more bytes
### Response:
def accumulate(self, buf):
'''add in some more bytes'''
accum = self.crc
for b in buf:
tmp = b ^ (accum & 0xff)
tmp = (tmp ^ (tmp<<4)) & 0xFF
accum = (accum>>8) ^ (tmp<<8) ^ (tmp<<3) ^ (tmp>>4)
self.crc = accum |
def get_instance(self, payload):
"""
Build an instance of SyncMapItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
"""
return SyncMapItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
) | Build an instance of SyncMapItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of SyncMapItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of SyncMapItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
"""
return SyncMapItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
) |
def _finalize_arguments(self, args):
"""Derive standard args from our weird ones
:type args: Namespace with command line arguments
"""
gps = args.gps
search = args.search
# ensure we have enough data for filter settling
max_plot = max(args.plot)
search = max(search, max_plot * 2 + 8)
args.search = search
self.log(3, "Search window: {0:.0f} sec, max plot window {1:.0f}".
format(search, max_plot))
# make sure we don't create too big interpolations
xpix = 1200.
if args.geometry:
m = re.match('(\\d+)x(\\d+)', args.geometry)
if m:
xpix = float(m.group(1))
# save output x for individual tres calulation
args.nx = xpix
self.args.tres = search / xpix / 2
self.log(3, 'Max time resolution (tres) set to {:.4f}'.format(
self.args.tres))
args.start = [[int(gps - search/2)]]
if args.epoch is None:
args.epoch = args.gps
args.duration = search
args.chan = [[args.chan]]
if args.color_scale is None:
args.color_scale = 'linear'
args.overlap = 0 # so that FFTMixin._finalize_arguments doesn't fail
xmin = args.xmin
xmax = args.xmax
super(Qtransform, self)._finalize_arguments(args)
# unset defaults from `TimeDomainProduct`
args.xmin = xmin
args.xmax = xmax | Derive standard args from our weird ones
:type args: Namespace with command line arguments | Below is the the instruction that describes the task:
### Input:
Derive standard args from our weird ones
:type args: Namespace with command line arguments
### Response:
def _finalize_arguments(self, args):
"""Derive standard args from our weird ones
:type args: Namespace with command line arguments
"""
gps = args.gps
search = args.search
# ensure we have enough data for filter settling
max_plot = max(args.plot)
search = max(search, max_plot * 2 + 8)
args.search = search
self.log(3, "Search window: {0:.0f} sec, max plot window {1:.0f}".
format(search, max_plot))
# make sure we don't create too big interpolations
xpix = 1200.
if args.geometry:
m = re.match('(\\d+)x(\\d+)', args.geometry)
if m:
xpix = float(m.group(1))
# save output x for individual tres calulation
args.nx = xpix
self.args.tres = search / xpix / 2
self.log(3, 'Max time resolution (tres) set to {:.4f}'.format(
self.args.tres))
args.start = [[int(gps - search/2)]]
if args.epoch is None:
args.epoch = args.gps
args.duration = search
args.chan = [[args.chan]]
if args.color_scale is None:
args.color_scale = 'linear'
args.overlap = 0 # so that FFTMixin._finalize_arguments doesn't fail
xmin = args.xmin
xmax = args.xmax
super(Qtransform, self)._finalize_arguments(args)
# unset defaults from `TimeDomainProduct`
args.xmin = xmin
args.xmax = xmax |
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = GF2int(1)
for i in _range(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = x.multiply(generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes | Compute the list of prime polynomials for the given generator and galois field characteristic exponent. | Below is the the instruction that describes the task:
### Input:
Compute the list of prime polynomials for the given generator and galois field characteristic exponent.
### Response:
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = GF2int(1)
for i in _range(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = x.multiply(generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes |
def get_outputs(self, input_value):
"""
Generate a set of output values for a given input.
"""
output_value = self.convert_to_xmlrpc(input_value)
output = {}
for name in self.output_names:
output[name] = output_value
return output | Generate a set of output values for a given input. | Below is the the instruction that describes the task:
### Input:
Generate a set of output values for a given input.
### Response:
def get_outputs(self, input_value):
"""
Generate a set of output values for a given input.
"""
output_value = self.convert_to_xmlrpc(input_value)
output = {}
for name in self.output_names:
output[name] = output_value
return output |
def _run_vagrant_command(self, args):
'''
Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment.
'''
# Make subprocess command
command = self._make_vagrant_command(args)
with self.err_cm() as err_fh:
return compat.decode(subprocess.check_output(command, cwd=self.root,
env=self.env, stderr=err_fh)) | Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment. | Below is the the instruction that describes the task:
### Input:
Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment.
### Response:
def _run_vagrant_command(self, args):
'''
Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment.
'''
# Make subprocess command
command = self._make_vagrant_command(args)
with self.err_cm() as err_fh:
return compat.decode(subprocess.check_output(command, cwd=self.root,
env=self.env, stderr=err_fh)) |
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage() | Produce a .coverage file from a smother file | Below is the the instruction that describes the task:
### Input:
Produce a .coverage file from a smother file
### Response:
def to_coverage(ctx):
"""
Produce a .coverage file from a smother file
"""
sm = Smother.load(ctx.obj['report'])
sm.coverage = coverage.coverage()
sm.write_coverage() |
def not_right(self, num):
"""
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]
"""
if num == None:
return FlatList([_get_list(self)[:-1:]])
if num <= 0:
return FlatList.EMPTY
return FlatList(_get_list(self)[:-num:]) | WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:] | Below is the the instruction that describes the task:
### Input:
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]
### Response:
def not_right(self, num):
"""
WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]
"""
if num == None:
return FlatList([_get_list(self)[:-1:]])
if num <= 0:
return FlatList.EMPTY
return FlatList(_get_list(self)[:-num:]) |
def _call(self, name, soapheaders):
"""return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters
"""
def call_closure(*args, **kwargs):
"""Call the named remote web service method."""
if len(args) and len(kwargs):
raise TypeError, 'Use positional or keyword argument only.'
if len(args) > 0:
raise TypeError, 'Not supporting SOAPENC:Arrays or XSD:List'
if len(kwargs):
args = kwargs
callinfo = getattr(self, name).callinfo
# go through the list of defined methods, and look for the one with
# the same number of arguments as what was passed. this is a weak
# check that should probably be improved in the future to check the
# types of the arguments to allow for polymorphism
for method in self._methods[name]:
if len(method.callinfo.inparams) == len(kwargs):
callinfo = method.callinfo
binding = _Binding(url=self._url or callinfo.location,
soapaction=callinfo.soapAction,
**self._kw)
kw = dict(unique=True)
if callinfo.use == 'encoded':
kw['unique'] = False
if callinfo.style == 'rpc':
request = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name), **kw)
response = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name+"Response"), **kw)
if len(callinfo.getInParameters()) != len(args):
raise RuntimeError('expecting "%s" parts, got %s' %(
str(callinfo.getInParameters(), str(args))))
for msg,pms in ((request,callinfo.getInParameters()),
(response,callinfo.getOutParameters())):
msg.ofwhat = []
for part in pms:
klass = GTD(*part.type)
if klass is None:
if part.type:
klass = filter(lambda gt: part.type==gt.type,TC.TYPES)
if len(klass) == 0:
klass = filter(lambda gt: part.type[1]==gt.type[1],TC.TYPES)
if not len(klass):klass = [TC.Any]
if len(klass) > 1: #Enumerations, XMLString, etc
klass = filter(lambda i: i.__dict__.has_key('type'), klass)
klass = klass[0]
else:
klass = TC.Any
msg.ofwhat.append(klass(part.name))
msg.ofwhat = tuple(msg.ofwhat)
if not args: args = {}
else:
# Grab <part element> attribute
ipart,opart = callinfo.getInParameters(),callinfo.getOutParameters()
if ( len(ipart) != 1 or not ipart[0].element_type or
ipart[0].type is None ):
raise RuntimeError, 'Bad Input Message "%s"' %callinfo.name
if ( len(opart) not in (0,1) or not opart[0].element_type or
opart[0].type is None ):
raise RuntimeError, 'Bad Output Message "%s"' %callinfo.name
# if ( len(args) > 1 ):
# raise RuntimeError, 'Message has only one part: %s' %str(args)
ipart = ipart[0]
request,response = GED(*ipart.type),None
if opart: response = GED(*opart[0].type)
msg = args
if self._asdict:
if not msg: msg = dict()
self._nullpyclass(request)
elif request.pyclass is not None:
if type(args) is dict:
msg = request.pyclass()
msg.__dict__.update(args)
elif type(args) is list and len(args) == 1:
msg = request.pyclass(args[0])
else:
msg = request.pyclass()
binding.Send(None, None, msg,
requesttypecode=request,
soapheaders=soapheaders,
encodingStyle=callinfo.encodingStyle)
if response is None:
return None
if self._asdict: self._nullpyclass(response)
return binding.Receive(replytype=response,
encodingStyle=callinfo.encodingStyle)
return call_closure | return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters | Below is the the instruction that describes the task:
### Input:
return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters
### Response:
def _call(self, name, soapheaders):
"""return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters
"""
def call_closure(*args, **kwargs):
"""Call the named remote web service method."""
if len(args) and len(kwargs):
raise TypeError, 'Use positional or keyword argument only.'
if len(args) > 0:
raise TypeError, 'Not supporting SOAPENC:Arrays or XSD:List'
if len(kwargs):
args = kwargs
callinfo = getattr(self, name).callinfo
# go through the list of defined methods, and look for the one with
# the same number of arguments as what was passed. this is a weak
# check that should probably be improved in the future to check the
# types of the arguments to allow for polymorphism
for method in self._methods[name]:
if len(method.callinfo.inparams) == len(kwargs):
callinfo = method.callinfo
binding = _Binding(url=self._url or callinfo.location,
soapaction=callinfo.soapAction,
**self._kw)
kw = dict(unique=True)
if callinfo.use == 'encoded':
kw['unique'] = False
if callinfo.style == 'rpc':
request = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name), **kw)
response = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name+"Response"), **kw)
if len(callinfo.getInParameters()) != len(args):
raise RuntimeError('expecting "%s" parts, got %s' %(
str(callinfo.getInParameters(), str(args))))
for msg,pms in ((request,callinfo.getInParameters()),
(response,callinfo.getOutParameters())):
msg.ofwhat = []
for part in pms:
klass = GTD(*part.type)
if klass is None:
if part.type:
klass = filter(lambda gt: part.type==gt.type,TC.TYPES)
if len(klass) == 0:
klass = filter(lambda gt: part.type[1]==gt.type[1],TC.TYPES)
if not len(klass):klass = [TC.Any]
if len(klass) > 1: #Enumerations, XMLString, etc
klass = filter(lambda i: i.__dict__.has_key('type'), klass)
klass = klass[0]
else:
klass = TC.Any
msg.ofwhat.append(klass(part.name))
msg.ofwhat = tuple(msg.ofwhat)
if not args: args = {}
else:
# Grab <part element> attribute
ipart,opart = callinfo.getInParameters(),callinfo.getOutParameters()
if ( len(ipart) != 1 or not ipart[0].element_type or
ipart[0].type is None ):
raise RuntimeError, 'Bad Input Message "%s"' %callinfo.name
if ( len(opart) not in (0,1) or not opart[0].element_type or
opart[0].type is None ):
raise RuntimeError, 'Bad Output Message "%s"' %callinfo.name
# if ( len(args) > 1 ):
# raise RuntimeError, 'Message has only one part: %s' %str(args)
ipart = ipart[0]
request,response = GED(*ipart.type),None
if opart: response = GED(*opart[0].type)
msg = args
if self._asdict:
if not msg: msg = dict()
self._nullpyclass(request)
elif request.pyclass is not None:
if type(args) is dict:
msg = request.pyclass()
msg.__dict__.update(args)
elif type(args) is list and len(args) == 1:
msg = request.pyclass(args[0])
else:
msg = request.pyclass()
binding.Send(None, None, msg,
requesttypecode=request,
soapheaders=soapheaders,
encodingStyle=callinfo.encodingStyle)
if response is None:
return None
if self._asdict: self._nullpyclass(response)
return binding.Receive(replytype=response,
encodingStyle=callinfo.encodingStyle)
return call_closure |
def Arrow(startPoint, endPoint, s=None, c="r", alpha=1, res=12):
"""
Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow|
"""
axis = np.array(endPoint) - np.array(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02
arr.SetTipRadius(sz)
arr.SetShaftRadius(sz / 1.75)
arr.SetTipLength(sz * 15)
arr.Update()
t = vtk.vtkTransform()
t.RotateZ(phi * 57.3)
t.RotateY(theta * 57.3)
t.RotateY(-90) # put it along Z
if s:
sz = 800.0 * s
t.Scale(length, sz, sz)
else:
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(arr.GetOutput())
tf.SetTransform(t)
tf.Update()
actor = Actor(tf.GetOutput(), c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(startPoint)
actor.DragableOff()
actor.PickableOff()
actor.base = np.array(startPoint)
actor.top = np.array(endPoint)
settings.collectable_actors.append(actor)
return actor | Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow| | Below is the the instruction that describes the task:
### Input:
Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow|
### Response:
def Arrow(startPoint, endPoint, s=None, c="r", alpha=1, res=12):
"""
Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow|
"""
axis = np.array(endPoint) - np.array(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02
arr.SetTipRadius(sz)
arr.SetShaftRadius(sz / 1.75)
arr.SetTipLength(sz * 15)
arr.Update()
t = vtk.vtkTransform()
t.RotateZ(phi * 57.3)
t.RotateY(theta * 57.3)
t.RotateY(-90) # put it along Z
if s:
sz = 800.0 * s
t.Scale(length, sz, sz)
else:
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(arr.GetOutput())
tf.SetTransform(t)
tf.Update()
actor = Actor(tf.GetOutput(), c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(startPoint)
actor.DragableOff()
actor.PickableOff()
actor.base = np.array(startPoint)
actor.top = np.array(endPoint)
settings.collectable_actors.append(actor)
return actor |
def send_content(
self,
http_code,
content,
mime_type="text/html",
http_message=None,
content_length=-1,
):
# type: (int, str, str, str, int) -> None
"""
Utility method to send the given content as an answer.
You can still use get_wfile or write afterwards, if you forced the
content length.
If content_length is negative (default), it will be computed as the
length of the content;
if it is positive, the given value will be used;
if it is None, the content-length header won't be sent.
:param http_code: HTTP result code
:param content: Data to be sent (must be a string)
:param mime_type: Content MIME type (content-type)
:param http_message: HTTP code description
:param content_length: Forced content length
"""
self.set_response(http_code, http_message)
if mime_type and not self.is_header_set("content-type"):
self.set_header("content-type", mime_type)
# Convert the content
raw_content = to_bytes(content)
if content_length is not None and not self.is_header_set(
"content-length"
):
if content_length < 0:
# Compute the length
content_length = len(raw_content)
# Send the length
self.set_header("content-length", content_length)
self.end_headers()
# Send the content
self.write(raw_content) | Utility method to send the given content as an answer.
You can still use get_wfile or write afterwards, if you forced the
content length.
If content_length is negative (default), it will be computed as the
length of the content;
if it is positive, the given value will be used;
if it is None, the content-length header won't be sent.
:param http_code: HTTP result code
:param content: Data to be sent (must be a string)
:param mime_type: Content MIME type (content-type)
:param http_message: HTTP code description
:param content_length: Forced content length | Below is the the instruction that describes the task:
### Input:
Utility method to send the given content as an answer.
You can still use get_wfile or write afterwards, if you forced the
content length.
If content_length is negative (default), it will be computed as the
length of the content;
if it is positive, the given value will be used;
if it is None, the content-length header won't be sent.
:param http_code: HTTP result code
:param content: Data to be sent (must be a string)
:param mime_type: Content MIME type (content-type)
:param http_message: HTTP code description
:param content_length: Forced content length
### Response:
def send_content(
self,
http_code,
content,
mime_type="text/html",
http_message=None,
content_length=-1,
):
# type: (int, str, str, str, int) -> None
"""
Utility method to send the given content as an answer.
You can still use get_wfile or write afterwards, if you forced the
content length.
If content_length is negative (default), it will be computed as the
length of the content;
if it is positive, the given value will be used;
if it is None, the content-length header won't be sent.
:param http_code: HTTP result code
:param content: Data to be sent (must be a string)
:param mime_type: Content MIME type (content-type)
:param http_message: HTTP code description
:param content_length: Forced content length
"""
self.set_response(http_code, http_message)
if mime_type and not self.is_header_set("content-type"):
self.set_header("content-type", mime_type)
# Convert the content
raw_content = to_bytes(content)
if content_length is not None and not self.is_header_set(
"content-length"
):
if content_length < 0:
# Compute the length
content_length = len(raw_content)
# Send the length
self.set_header("content-length", content_length)
self.end_headers()
# Send the content
self.write(raw_content) |
def QA_util_get_real_date(date, trade_list=trade_date_sse, towards=-1):
"""
获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut
"""
date = str(date)[0:10]
if towards == 1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') +
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10]
elif towards == -1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') -
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10] | 获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut | Below is the the instruction that describes the task:
### Input:
获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut
### Response:
def QA_util_get_real_date(date, trade_list=trade_date_sse, towards=-1):
"""
获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut
"""
date = str(date)[0:10]
if towards == 1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') +
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10]
elif towards == -1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') -
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10] |
def geom_check_axis(g, atwts, ax,
nmax=_DEF.SYMM_MATCH_NMAX,
tol=_DEF.SYMM_MATCH_TOL):
""" [Get max proper order and reflection for an axis]
.. todo:: Complete geom_parse_axis docstring
"""
# Imports
import numpy as np
# Store the max found rotation order of the geometry.
order = geom_find_rotsymm(g, atwts, ax, \
False, nmax, tol)[0]
# Store the presence/absence of a reflection plane.
refl = geom_symm_match(g, atwts, ax, 0, True) < tol
# Return the pair of values for outside handling
return order, refl | [Get max proper order and reflection for an axis]
.. todo:: Complete geom_parse_axis docstring | Below is the the instruction that describes the task:
### Input:
[Get max proper order and reflection for an axis]
.. todo:: Complete geom_parse_axis docstring
### Response:
def geom_check_axis(g, atwts, ax,
nmax=_DEF.SYMM_MATCH_NMAX,
tol=_DEF.SYMM_MATCH_TOL):
""" [Get max proper order and reflection for an axis]
.. todo:: Complete geom_parse_axis docstring
"""
# Imports
import numpy as np
# Store the max found rotation order of the geometry.
order = geom_find_rotsymm(g, atwts, ax, \
False, nmax, tol)[0]
# Store the presence/absence of a reflection plane.
refl = geom_symm_match(g, atwts, ax, 0, True) < tol
# Return the pair of values for outside handling
return order, refl |
def _get_fwl_port_speed(self, server_id, is_virt=True):
"""Determines the appropriate speed for a firewall.
:param int server_id: The ID of server the firewall is for
:param bool is_virt: True if the server_id is for a virtual server
:returns: a integer representing the Mbps speed of a firewall
"""
fwl_port_speed = 0
if is_virt:
mask = ('primaryNetworkComponent[maxSpeed]')
svc = self.client['Virtual_Guest']
primary = svc.getObject(mask=mask, id=server_id)
fwl_port_speed = primary['primaryNetworkComponent']['maxSpeed']
else:
mask = ('id,maxSpeed,networkComponentGroup.networkComponents')
svc = self.client['Hardware_Server']
network_components = svc.getFrontendNetworkComponents(
mask=mask, id=server_id)
grouped = [interface['networkComponentGroup']['networkComponents']
for interface in network_components
if 'networkComponentGroup' in interface]
ungrouped = [interface
for interface in network_components
if 'networkComponentGroup' not in interface]
# For each group, sum the maxSpeeds of each compoment in the
# group. Put the sum for each in a new list
group_speeds = []
for group in grouped:
group_speed = 0
for interface in group:
group_speed += interface['maxSpeed']
group_speeds.append(group_speed)
# The max speed of all groups is the max of the list
max_grouped_speed = max(group_speeds)
max_ungrouped = 0
for interface in ungrouped:
max_ungrouped = max(max_ungrouped, interface['maxSpeed'])
fwl_port_speed = max(max_grouped_speed, max_ungrouped)
return fwl_port_speed | Determines the appropriate speed for a firewall.
:param int server_id: The ID of server the firewall is for
:param bool is_virt: True if the server_id is for a virtual server
:returns: a integer representing the Mbps speed of a firewall | Below is the the instruction that describes the task:
### Input:
Determines the appropriate speed for a firewall.
:param int server_id: The ID of server the firewall is for
:param bool is_virt: True if the server_id is for a virtual server
:returns: a integer representing the Mbps speed of a firewall
### Response:
def _get_fwl_port_speed(self, server_id, is_virt=True):
"""Determines the appropriate speed for a firewall.
:param int server_id: The ID of server the firewall is for
:param bool is_virt: True if the server_id is for a virtual server
:returns: a integer representing the Mbps speed of a firewall
"""
fwl_port_speed = 0
if is_virt:
mask = ('primaryNetworkComponent[maxSpeed]')
svc = self.client['Virtual_Guest']
primary = svc.getObject(mask=mask, id=server_id)
fwl_port_speed = primary['primaryNetworkComponent']['maxSpeed']
else:
mask = ('id,maxSpeed,networkComponentGroup.networkComponents')
svc = self.client['Hardware_Server']
network_components = svc.getFrontendNetworkComponents(
mask=mask, id=server_id)
grouped = [interface['networkComponentGroup']['networkComponents']
for interface in network_components
if 'networkComponentGroup' in interface]
ungrouped = [interface
for interface in network_components
if 'networkComponentGroup' not in interface]
# For each group, sum the maxSpeeds of each compoment in the
# group. Put the sum for each in a new list
group_speeds = []
for group in grouped:
group_speed = 0
for interface in group:
group_speed += interface['maxSpeed']
group_speeds.append(group_speed)
# The max speed of all groups is the max of the list
max_grouped_speed = max(group_speeds)
max_ungrouped = 0
for interface in ungrouped:
max_ungrouped = max(max_ungrouped, interface['maxSpeed'])
fwl_port_speed = max(max_grouped_speed, max_ungrouped)
return fwl_port_speed |
def parse_domains(self, domain, params):
"""
Parse a single Route53Domains domain
"""
domain_id = self.get_non_aws_id(domain['DomainName'])
domain['name'] = domain.pop('DomainName')
#TODO: Get Dnssec info when available
#api_client = params['api_client']
#details = api_client.get_domain_detail(DomainName = domain['name'])
#get_keys(details, domain, ['Dnssec'])
self.domains[domain_id] = domain | Parse a single Route53Domains domain | Below is the the instruction that describes the task:
### Input:
Parse a single Route53Domains domain
### Response:
def parse_domains(self, domain, params):
"""
Parse a single Route53Domains domain
"""
domain_id = self.get_non_aws_id(domain['DomainName'])
domain['name'] = domain.pop('DomainName')
#TODO: Get Dnssec info when available
#api_client = params['api_client']
#details = api_client.get_domain_detail(DomainName = domain['name'])
#get_keys(details, domain, ['Dnssec'])
self.domains[domain_id] = domain |
def _get_field_type(self, key, value):
"""
Helper to create field object based on value type
"""
if isinstance(value, bool):
return BooleanField(name=key)
elif isinstance(value, int):
return IntegerField(name=key)
elif isinstance(value, float):
return FloatField(name=key)
elif isinstance(value, str):
return StringField(name=key)
elif isinstance(value, time):
return TimeField(name=key)
elif isinstance(value, datetime):
return DateTimeField(name=key)
elif isinstance(value, date):
return DateField(name=key)
elif isinstance(value, timedelta):
return TimedeltaField(name=key)
elif isinstance(value, Enum):
return EnumField(name=key, enum_class=type(value))
elif isinstance(value, (dict, BaseDynamicModel, Mapping)):
return ModelField(name=key, model_class=self.__dynamic_model__ or self.__class__)
elif isinstance(value, BaseModel):
return ModelField(name=key, model_class=value.__class__)
elif isinstance(value, (list, set, ListModel)):
if not len(value):
return None
field_type = self._get_field_type(None, value[0])
return ArrayField(name=key, field_type=field_type)
elif value is None:
return None
else:
raise TypeError("Invalid parameter: %s. Type not supported." % (key,)) | Helper to create field object based on value type | Below is the the instruction that describes the task:
### Input:
Helper to create field object based on value type
### Response:
def _get_field_type(self, key, value):
"""
Helper to create field object based on value type
"""
if isinstance(value, bool):
return BooleanField(name=key)
elif isinstance(value, int):
return IntegerField(name=key)
elif isinstance(value, float):
return FloatField(name=key)
elif isinstance(value, str):
return StringField(name=key)
elif isinstance(value, time):
return TimeField(name=key)
elif isinstance(value, datetime):
return DateTimeField(name=key)
elif isinstance(value, date):
return DateField(name=key)
elif isinstance(value, timedelta):
return TimedeltaField(name=key)
elif isinstance(value, Enum):
return EnumField(name=key, enum_class=type(value))
elif isinstance(value, (dict, BaseDynamicModel, Mapping)):
return ModelField(name=key, model_class=self.__dynamic_model__ or self.__class__)
elif isinstance(value, BaseModel):
return ModelField(name=key, model_class=value.__class__)
elif isinstance(value, (list, set, ListModel)):
if not len(value):
return None
field_type = self._get_field_type(None, value[0])
return ArrayField(name=key, field_type=field_type)
elif value is None:
return None
else:
raise TypeError("Invalid parameter: %s. Type not supported." % (key,)) |
def get_members(self, **query_params):
'''
Get all members attached to this organisation. Returns a list of
Member objects
Returns:
list(Member): The members attached to this organisation
'''
members = self.get_members_json(self.base_uri,
query_params=query_params)
members_list = []
for member_json in members:
members_list.append(self.create_member(member_json))
return members_list | Get all members attached to this organisation. Returns a list of
Member objects
Returns:
list(Member): The members attached to this organisation | Below is the the instruction that describes the task:
### Input:
Get all members attached to this organisation. Returns a list of
Member objects
Returns:
list(Member): The members attached to this organisation
### Response:
def get_members(self, **query_params):
'''
Get all members attached to this organisation. Returns a list of
Member objects
Returns:
list(Member): The members attached to this organisation
'''
members = self.get_members_json(self.base_uri,
query_params=query_params)
members_list = []
for member_json in members:
members_list.append(self.create_member(member_json))
return members_list |
def nlp(self, inputString, sourceTime=None, version=None):
"""Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches
"""
orig_inputstring = inputString
# replace periods at the end of sentences w/ spaces
# opposed to removing them altogether in order to
# retain relative positions (identified by alpha, period, space).
# this is required for some of the regex patterns to match
inputString = re.sub(r'(\w)(\.)(\s)', r'\1 \3', inputString).lower()
inputString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', inputString)
inputString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', inputString)
startpos = 0 # the start position in the inputString during the loop
# list of lists in format:
# [startpos, endpos, matchedstring, flags, type]
matches = []
while startpos < len(inputString):
# empty match
leftmost_match = [0, 0, None, 0, None]
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 0
leftmost_match[4] = 'modifier'
# Quantity + Units
m = self.ptc.CRE_UNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'units'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_QUNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'qunits'):
debug and log.debug('day suffix trapped by qunit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'qunits'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
m = self.ptc.CRE_DATE3.search(inputString[startpos:])
# NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
# for match in self.ptc.CRE_DATE3.finditer(inputString[startpos:]):
# to prevent "HH:MM(:SS) time strings" expressions from
# triggering this regex, we checks if the month field exists
# in the searched expression, if it doesn't exist, the date
# field is not valid
# if match.group('mthname'):
# m = self.ptc.CRE_DATE3.search(inputString[startpos:],
# match.start())
# break
# String date format
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStr'
# Standard date format
m = self.ptc.CRE_DATE.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStd'
# Natural language day strings
m = self.ptc.CRE_DAY.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'dayStr'
# Weekday
m = self.ptc.CRE_WEEKDAY.search(inputString[startpos:])
if m is not None:
if inputString[startpos:] not in self.ptc.dayOffsets:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'weekdy'
# Natural language time strings
m = self.ptc.CRE_TIME.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 2
leftmost_match[4] = 'timeStr'
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
leftmost_match[1] = m.end('meridian') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'meridian'
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
if m.group('seconds') is not None:
leftmost_match[1] = m.end('seconds') + startpos
else:
leftmost_match[1] = m.end('minutes') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'timeStd'
# Units only; must be preceded by a modifier
if len(matches) > 0 and matches[-1][3] == 0:
m = self.ptc.CRE_UNITS_ONLY.search(inputString[startpos:])
# Ensure that any match is immediately proceded by the
# modifier. "Next is the word 'month'" should not parse as a
# date while "next month" should
if m is not None and \
inputString[startpos:startpos +
m.start()].strip() == '':
debug and log.debug('CRE_UNITS_ONLY matched [%s]',
m.group())
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 3
leftmost_match[4] = 'unitsOnly'
# set the start position to the end pos of the leftmost match
startpos = leftmost_match[1]
# nothing was detected
# so break out of the loop
if startpos == 0:
startpos = len(inputString)
else:
if leftmost_match[3] > 0:
m = self.ptc.CRE_NLP_PREFIX.search(
inputString[:leftmost_match[0]] +
' ' + str(leftmost_match[3]))
if m is not None:
leftmost_match[0] = m.start('nlp_prefix')
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
matches.append(leftmost_match)
# find matches in proximity with one another and
# return all the parsed values
proximity_matches = []
if len(matches) > 1:
combined = ''
from_match_index = 0
date = matches[0][3] == 1
time = matches[0][3] == 2
units = matches[0][3] == 3
for i in range(1, len(matches)):
# test proximity (are there characters between matches?)
endofprevious = matches[i - 1][1]
begofcurrent = matches[i][0]
if orig_inputstring[endofprevious:
begofcurrent].lower().strip() != '':
# this one isn't in proximity, but maybe
# we have enough to make a datetime
# TODO: make sure the combination of
# formats (modifier, dateStd, etc) makes logical sense
# before parsing together
if date or time or units:
combined = orig_inputstring[matches[from_match_index]
[0]:matches[i - 1][1]]
parsed_datetime, flags = self.parse(combined,
sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[i - 1][1],
combined))
# not in proximity, reset starting from current
from_match_index = i
date = matches[i][3] == 1
time = matches[i][3] == 2
units = matches[i][3] == 3
continue
else:
if matches[i][3] == 1:
date = True
if matches[i][3] == 2:
time = True
if matches[i][3] == 3:
units = True
# check last
# we have enough to make a datetime
if date or time or units:
combined = orig_inputstring[matches[from_match_index][0]:
matches[len(matches) - 1][1]]
parsed_datetime, flags = self.parse(combined, sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[len(matches) - 1][1],
combined))
elif len(matches) == 0:
return None
else:
if matches[0][3] == 0: # not enough info to parse
return None
else:
combined = orig_inputstring[matches[0][0]:matches[0][1]]
parsed_datetime, flags = self.parse(matches[0][2], sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[0][0],
matches[0][1],
combined))
return tuple(proximity_matches) | Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches | Below is the the instruction that describes the task:
### Input:
Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches
### Response:
def nlp(self, inputString, sourceTime=None, version=None):
"""Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches
"""
orig_inputstring = inputString
# replace periods at the end of sentences w/ spaces
# opposed to removing them altogether in order to
# retain relative positions (identified by alpha, period, space).
# this is required for some of the regex patterns to match
inputString = re.sub(r'(\w)(\.)(\s)', r'\1 \3', inputString).lower()
inputString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', inputString)
inputString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', inputString)
startpos = 0 # the start position in the inputString during the loop
# list of lists in format:
# [startpos, endpos, matchedstring, flags, type]
matches = []
while startpos < len(inputString):
# empty match
leftmost_match = [0, 0, None, 0, None]
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 0
leftmost_match[4] = 'modifier'
# Quantity + Units
m = self.ptc.CRE_UNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_UNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'units'):
debug and log.debug('day suffix trapped by unit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'units'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug('CRE_QUNITS matched')
if self._UnitsTrapped(inputString[startpos:], m, 'qunits'):
debug and log.debug('day suffix trapped by qunit match')
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('qty') + startpos:
leftmost_match[0] = m.start('qty') + startpos
leftmost_match[1] = m.end('qty') + startpos
leftmost_match[2] = m.group('qty')
leftmost_match[3] = 3
leftmost_match[4] = 'qunits'
if m.start('qty') > 0 and \
inputString[m.start('qty') - 1] == '-':
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = '-' + leftmost_match[2]
m = self.ptc.CRE_DATE3.search(inputString[startpos:])
# NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
# for match in self.ptc.CRE_DATE3.finditer(inputString[startpos:]):
# to prevent "HH:MM(:SS) time strings" expressions from
# triggering this regex, we checks if the month field exists
# in the searched expression, if it doesn't exist, the date
# field is not valid
# if match.group('mthname'):
# m = self.ptc.CRE_DATE3.search(inputString[startpos:],
# match.start())
# break
# String date format
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStr'
# Standard date format
m = self.ptc.CRE_DATE.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('date') + startpos:
leftmost_match[0] = m.start('date') + startpos
leftmost_match[1] = m.end('date') + startpos
leftmost_match[2] = m.group('date')
leftmost_match[3] = 1
leftmost_match[4] = 'dateStd'
# Natural language day strings
m = self.ptc.CRE_DAY.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'dayStr'
# Weekday
m = self.ptc.CRE_WEEKDAY.search(inputString[startpos:])
if m is not None:
if inputString[startpos:] not in self.ptc.dayOffsets:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 1
leftmost_match[4] = 'weekdy'
# Natural language time strings
m = self.ptc.CRE_TIME.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 2
leftmost_match[4] = 'timeStr'
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
leftmost_match[1] = m.end('meridian') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'meridian'
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start('hours') + startpos:
leftmost_match[0] = m.start('hours') + startpos
if m.group('seconds') is not None:
leftmost_match[1] = m.end('seconds') + startpos
else:
leftmost_match[1] = m.end('minutes') + startpos
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
leftmost_match[3] = 2
leftmost_match[4] = 'timeStd'
# Units only; must be preceded by a modifier
if len(matches) > 0 and matches[-1][3] == 0:
m = self.ptc.CRE_UNITS_ONLY.search(inputString[startpos:])
# Ensure that any match is immediately proceded by the
# modifier. "Next is the word 'month'" should not parse as a
# date while "next month" should
if m is not None and \
inputString[startpos:startpos +
m.start()].strip() == '':
debug and log.debug('CRE_UNITS_ONLY matched [%s]',
m.group())
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 3
leftmost_match[4] = 'unitsOnly'
# set the start position to the end pos of the leftmost match
startpos = leftmost_match[1]
# nothing was detected
# so break out of the loop
if startpos == 0:
startpos = len(inputString)
else:
if leftmost_match[3] > 0:
m = self.ptc.CRE_NLP_PREFIX.search(
inputString[:leftmost_match[0]] +
' ' + str(leftmost_match[3]))
if m is not None:
leftmost_match[0] = m.start('nlp_prefix')
leftmost_match[2] = inputString[leftmost_match[0]:
leftmost_match[1]]
matches.append(leftmost_match)
# find matches in proximity with one another and
# return all the parsed values
proximity_matches = []
if len(matches) > 1:
combined = ''
from_match_index = 0
date = matches[0][3] == 1
time = matches[0][3] == 2
units = matches[0][3] == 3
for i in range(1, len(matches)):
# test proximity (are there characters between matches?)
endofprevious = matches[i - 1][1]
begofcurrent = matches[i][0]
if orig_inputstring[endofprevious:
begofcurrent].lower().strip() != '':
# this one isn't in proximity, but maybe
# we have enough to make a datetime
# TODO: make sure the combination of
# formats (modifier, dateStd, etc) makes logical sense
# before parsing together
if date or time or units:
combined = orig_inputstring[matches[from_match_index]
[0]:matches[i - 1][1]]
parsed_datetime, flags = self.parse(combined,
sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[i - 1][1],
combined))
# not in proximity, reset starting from current
from_match_index = i
date = matches[i][3] == 1
time = matches[i][3] == 2
units = matches[i][3] == 3
continue
else:
if matches[i][3] == 1:
date = True
if matches[i][3] == 2:
time = True
if matches[i][3] == 3:
units = True
# check last
# we have enough to make a datetime
if date or time or units:
combined = orig_inputstring[matches[from_match_index][0]:
matches[len(matches) - 1][1]]
parsed_datetime, flags = self.parse(combined, sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[len(matches) - 1][1],
combined))
elif len(matches) == 0:
return None
else:
if matches[0][3] == 0: # not enough info to parse
return None
else:
combined = orig_inputstring[matches[0][0]:matches[0][1]]
parsed_datetime, flags = self.parse(matches[0][2], sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[0][0],
matches[0][1],
combined))
return tuple(proximity_matches) |
def example_delta_alter_configs(a, args):
"""
The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case.
"""
# Convert supplied config to resources.
# We can reuse the same resources both for describe_configs and
# alter_configs.
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
# Set up a locked counter and an Event (for signaling) to track when the
# second level of futures are done. This is a bit of contrived example
# due to no other asynchronous mechanism being used, so we'll need
# to wait on something to signal completion.
class WaitZero(object):
def __init__(self, waitcnt):
self.cnt = waitcnt
self.lock = threading.Lock()
self.event = threading.Event()
def decr(self):
""" Decrement cnt by 1"""
with self.lock:
assert self.cnt > 0
self.cnt -= 1
self.event.set()
def wait(self):
""" Wait until cnt reaches 0 """
self.lock.acquire()
while self.cnt > 0:
self.lock.release()
self.event.wait()
self.event.clear()
self.lock.acquire()
self.lock.release()
def __len__(self):
with self.lock:
return self.cnt
wait_zero = WaitZero(len(resources))
# Read existing configuration from cluster
fs = a.describe_configs(resources)
def delta_alter_configs_done(fut, resource):
e = fut.exception()
if e is not None:
print("Config update for {} failed: {}".format(resource, e))
else:
print("Config for {} updated".format(resource))
wait_zero.decr()
def delta_alter_configs(resource, remote_config):
print("Updating {} supplied config entries {} with {} config entries read from cluster".format(
len(resource), resource, len(remote_config)))
# Only set configuration that is not default
for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]:
resource.set_config(k, entry.value, overwrite=False)
fs = a.alter_configs([resource])
fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource))
# For each resource's future set up a completion callback
# that in turn calls alter_configs() on that single resource.
# This is ineffective since the resources can usually go in
# one single alter_configs() call, but we're also show-casing
# the futures here.
for res, f in fs.items():
f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))
# Wait for done callbacks to be triggered and operations to complete.
print("Waiting for {} resource updates to finish".format(len(wait_zero)))
wait_zero.wait() | The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case. | Below is the the instruction that describes the task:
### Input:
The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case.
### Response:
def example_delta_alter_configs(a, args):
"""
The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case.
"""
# Convert supplied config to resources.
# We can reuse the same resources both for describe_configs and
# alter_configs.
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split('=') for conf in configs.split(',')]:
resource.set_config(k, v)
# Set up a locked counter and an Event (for signaling) to track when the
# second level of futures are done. This is a bit of contrived example
# due to no other asynchronous mechanism being used, so we'll need
# to wait on something to signal completion.
class WaitZero(object):
def __init__(self, waitcnt):
self.cnt = waitcnt
self.lock = threading.Lock()
self.event = threading.Event()
def decr(self):
""" Decrement cnt by 1"""
with self.lock:
assert self.cnt > 0
self.cnt -= 1
self.event.set()
def wait(self):
""" Wait until cnt reaches 0 """
self.lock.acquire()
while self.cnt > 0:
self.lock.release()
self.event.wait()
self.event.clear()
self.lock.acquire()
self.lock.release()
def __len__(self):
with self.lock:
return self.cnt
wait_zero = WaitZero(len(resources))
# Read existing configuration from cluster
fs = a.describe_configs(resources)
def delta_alter_configs_done(fut, resource):
e = fut.exception()
if e is not None:
print("Config update for {} failed: {}".format(resource, e))
else:
print("Config for {} updated".format(resource))
wait_zero.decr()
def delta_alter_configs(resource, remote_config):
print("Updating {} supplied config entries {} with {} config entries read from cluster".format(
len(resource), resource, len(remote_config)))
# Only set configuration that is not default
for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]:
resource.set_config(k, entry.value, overwrite=False)
fs = a.alter_configs([resource])
fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource))
# For each resource's future set up a completion callback
# that in turn calls alter_configs() on that single resource.
# This is ineffective since the resources can usually go in
# one single alter_configs() call, but we're also show-casing
# the futures here.
for res, f in fs.items():
f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))
# Wait for done callbacks to be triggered and operations to complete.
print("Waiting for {} resource updates to finish".format(len(wait_zero)))
wait_zero.wait() |
def _set_edgeport(self, v, load=False):
"""
Setter method for edgeport, mapped from YANG variable /interface/port_channel/spanning_tree/edgeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edgeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edgeport() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edgeport.edgeport, is_container='container', presence=False, yang_name="edgeport", rest_name="edgeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Edgeport', u'display-when': u'((/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst)) '}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """edgeport must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=edgeport.edgeport, is_container='container', presence=False, yang_name="edgeport", rest_name="edgeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Edgeport', u'display-when': u'((/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst)) '}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__edgeport = t
if hasattr(self, '_set'):
self._set() | Setter method for edgeport, mapped from YANG variable /interface/port_channel/spanning_tree/edgeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edgeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edgeport() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for edgeport, mapped from YANG variable /interface/port_channel/spanning_tree/edgeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edgeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edgeport() directly.
### Response:
def _set_edgeport(self, v, load=False):
"""
Setter method for edgeport, mapped from YANG variable /interface/port_channel/spanning_tree/edgeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edgeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edgeport() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edgeport.edgeport, is_container='container', presence=False, yang_name="edgeport", rest_name="edgeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Edgeport', u'display-when': u'((/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst)) '}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """edgeport must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=edgeport.edgeport, is_container='container', presence=False, yang_name="edgeport", rest_name="edgeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Edgeport', u'display-when': u'((/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst)) '}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__edgeport = t
if hasattr(self, '_set'):
self._set() |
def get_time(self) -> float:
"""
Get the current position in the music in seconds
"""
if self.paused:
return self.pause_time
return mixer.music.get_pos() / 1000.0 | Get the current position in the music in seconds | Below is the the instruction that describes the task:
### Input:
Get the current position in the music in seconds
### Response:
def get_time(self) -> float:
"""
Get the current position in the music in seconds
"""
if self.paused:
return self.pause_time
return mixer.music.get_pos() / 1000.0 |
def update(self, *, name=None, show_headers=None, show_totals=None, style=None):
"""
Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure
"""
if name is None and show_headers is None and show_totals is None and style is None:
raise ValueError('Provide at least one parameter to update')
data = {}
if name:
data['name'] = name
if show_headers:
data['showHeaders'] = show_headers
if show_totals:
data['showTotals'] = show_totals
if style:
data['style'] = style
response = self.session.patch(self.build_url(''), data=data)
if not response:
return False
data = response.json()
self.name = data.get('name', self.name)
self.show_headers = data.get('showHeaders', self.show_headers)
self.show_totals = data.get('showTotals', self.show_totals)
self.style = data.get('style', self.style)
return True | Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure | Below is the the instruction that describes the task:
### Input:
Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure
### Response:
def update(self, *, name=None, show_headers=None, show_totals=None, style=None):
"""
Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure
"""
if name is None and show_headers is None and show_totals is None and style is None:
raise ValueError('Provide at least one parameter to update')
data = {}
if name:
data['name'] = name
if show_headers:
data['showHeaders'] = show_headers
if show_totals:
data['showTotals'] = show_totals
if style:
data['style'] = style
response = self.session.patch(self.build_url(''), data=data)
if not response:
return False
data = response.json()
self.name = data.get('name', self.name)
self.show_headers = data.get('showHeaders', self.show_headers)
self.show_totals = data.get('showTotals', self.show_totals)
self.style = data.get('style', self.style)
return True |
def neighbor_add(self, address, remote_as,
remote_port=DEFAULT_BGP_PORT,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_ipv6=DEFAULT_CAP_MBGP_IPV6,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS,
enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS,
enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS,
enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS,
enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None,
is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT,
is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT,
is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF,
local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE):
""" This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``remote_port`` specifies the TCP port number of the peer.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both'
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
REMOTE_PORT: remote_port,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
CAP_MBGP_IPV4: enable_ipv4,
CAP_MBGP_IPV6: enable_ipv6,
CAP_MBGP_VPNV4: enable_vpnv4,
CAP_MBGP_VPNV6: enable_vpnv6,
CAP_MBGP_EVPN: enable_evpn,
CAP_MBGP_IPV4FS: enable_ipv4fs,
CAP_MBGP_IPV6FS: enable_ipv6fs,
CAP_MBGP_VPNV4FS: enable_vpnv4fs,
CAP_MBGP_VPNV6FS: enable_vpnv6fs,
CAP_MBGP_L2VPNFS: enable_l2vpnfs,
}
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor) | This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``remote_port`` specifies the TCP port number of the peer.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both' | Below is the the instruction that describes the task:
### Input:
This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``remote_port`` specifies the TCP port number of the peer.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both'
### Response:
def neighbor_add(self, address, remote_as,
remote_port=DEFAULT_BGP_PORT,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_ipv6=DEFAULT_CAP_MBGP_IPV6,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS,
enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS,
enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS,
enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS,
enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None,
is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT,
is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT,
is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF,
local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE):
""" This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``remote_port`` specifies the TCP port number of the peer.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both'
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
REMOTE_PORT: remote_port,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
CAP_MBGP_IPV4: enable_ipv4,
CAP_MBGP_IPV6: enable_ipv6,
CAP_MBGP_VPNV4: enable_vpnv4,
CAP_MBGP_VPNV6: enable_vpnv6,
CAP_MBGP_EVPN: enable_evpn,
CAP_MBGP_IPV4FS: enable_ipv4fs,
CAP_MBGP_IPV6FS: enable_ipv6fs,
CAP_MBGP_VPNV4FS: enable_vpnv4fs,
CAP_MBGP_VPNV6FS: enable_vpnv6fs,
CAP_MBGP_L2VPNFS: enable_l2vpnfs,
}
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor) |
def get_ids(a):
"""
make copy of sequences with short identifier
"""
a_id = '%s.id.fa' % (a.rsplit('.', 1)[0])
a_id_lookup = '%s.id.lookup' % (a.rsplit('.', 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, 'w')
a_id_lookup_f = open(a_id_lookup, 'w')
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split('>')[1]
name = remove_bad(header)
seq[0] = '>%s %s' % (id, header)
print('\n'.join(seq), file=a_id_f)
print('%s\t%s\t%s' % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup | make copy of sequences with short identifier | Below is the the instruction that describes the task:
### Input:
make copy of sequences with short identifier
### Response:
def get_ids(a):
"""
make copy of sequences with short identifier
"""
a_id = '%s.id.fa' % (a.rsplit('.', 1)[0])
a_id_lookup = '%s.id.lookup' % (a.rsplit('.', 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, 'w')
a_id_lookup_f = open(a_id_lookup, 'w')
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split('>')[1]
name = remove_bad(header)
seq[0] = '>%s %s' % (id, header)
print('\n'.join(seq), file=a_id_f)
print('%s\t%s\t%s' % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup |
def SetRange(self, range_offset, range_size):
"""Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid.
"""
if self._is_open:
raise IOError('Already open.')
if range_offset < 0:
raise ValueError(
'Invalid range offset: {0:d} value out of bounds.'.format(
range_offset))
if range_size < 0:
raise ValueError(
'Invalid range size: {0:d} value out of bounds.'.format(
range_size))
self._range_offset = range_offset
self._range_size = range_size
self._current_offset = 0 | Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid. | Below is the the instruction that describes the task:
### Input:
Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid.
### Response:
def SetRange(self, range_offset, range_size):
"""Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid.
"""
if self._is_open:
raise IOError('Already open.')
if range_offset < 0:
raise ValueError(
'Invalid range offset: {0:d} value out of bounds.'.format(
range_offset))
if range_size < 0:
raise ValueError(
'Invalid range size: {0:d} value out of bounds.'.format(
range_size))
self._range_offset = range_offset
self._range_size = range_size
self._current_offset = 0 |
def checkbox_check(self, force_check=False):
"""
Wrapper to check a checkbox
"""
if not self.get_attribute('checked'):
self.click(force_click=force_check) | Wrapper to check a checkbox | Below is the the instruction that describes the task:
### Input:
Wrapper to check a checkbox
### Response:
def checkbox_check(self, force_check=False):
"""
Wrapper to check a checkbox
"""
if not self.get_attribute('checked'):
self.click(force_click=force_check) |
def release_port(self, port):
"""release port"""
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port) | release port | Below is the the instruction that describes the task:
### Input:
release port
### Response:
def release_port(self, port):
"""release port"""
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port) |
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None | Returns the scope at which a message was enabled/disabled. | Below is the the instruction that describes the task:
### Input:
Returns the scope at which a message was enabled/disabled.
### Response:
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None |
def load_retaildata():
"""Monthly retail trade data from census.gov."""
# full = 'https://www.census.gov/retail/mrts/www/mrtssales92-present.xls'
# indiv = 'https://www.census.gov/retail/marts/www/timeseries.html'
db = {
"Auto, other Motor Vehicle": "https://www.census.gov/retail/marts/www/adv441x0.txt",
"Building Material and Garden Equipment and Supplies Dealers": "https://www.census.gov/retail/marts/www/adv44400.txt",
"Clothing and Clothing Accessories Stores": "https://www.census.gov/retail/marts/www/adv44800.txt",
"Dept. Stores (ex. leased depts)": "https://www.census.gov/retail/marts/www/adv45210.txt",
"Electronics and Appliance Stores": "https://www.census.gov/retail/marts/www/adv44300.txt",
"Food Services and Drinking Places": "https://www.census.gov/retail/marts/www/adv72200.txt",
"Food and Beverage Stores": "https://www.census.gov/retail/marts/www/adv44500.txt",
"Furniture and Home Furnishings Stores": "https://www.census.gov/retail/marts/www/adv44200.txt",
"Gasoline Stations": "https://www.census.gov/retail/marts/www/adv44700.txt",
"General Merchandise Stores": "https://www.census.gov/retail/marts/www/adv45200.txt",
"Grocery Stores": "https://www.census.gov/retail/marts/www/adv44510.txt",
"Health and Personal Care Stores": "https://www.census.gov/retail/marts/www/adv44600.txt",
"Miscellaneous Store Retailers": "https://www.census.gov/retail/marts/www/adv45300.txt",
"Motor Vehicle and Parts Dealers": "https://www.census.gov/retail/marts/www/adv44100.txt",
"Nonstore Retailers": "https://www.census.gov/retail/marts/www/adv45400.txt",
"Retail and Food Services, total": "https://www.census.gov/retail/marts/www/adv44x72.txt",
"Retail, total": "https://www.census.gov/retail/marts/www/adv44000.txt",
"Sporting Goods, Hobby, Book, and Music Stores": "https://www.census.gov/retail/marts/www/adv45100.txt",
"Total (excl. Motor Vehicle)": "https://www.census.gov/retail/marts/www/adv44y72.txt",
"Retail (excl. Motor Vehicle and Parts Dealers)": "https://www.census.gov/retail/marts/www/adv4400a.txt",
}
dct = {}
for key, value in db.items():
data = pd.read_csv(
value,
skiprows=5,
skip_blank_lines=True,
header=None,
sep="\s+",
index_col=0,
)
try:
cut = data.index.get_loc("SEASONAL")
except KeyError:
cut = data.index.get_loc("NO")
data = data.iloc[:cut]
data = data.apply(lambda col: pd.to_numeric(col, downcast="float"))
data = data.stack()
year = data.index.get_level_values(0)
month = data.index.get_level_values(1)
idx = pd.to_datetime(
{"year": year, "month": month, "day": 1}
) + offsets.MonthEnd(1)
data.index = idx
data.name = key
dct[key] = data
sales = pd.DataFrame(dct)
sales = sales.reindex(
pd.date_range(sales.index[0], sales.index[-1], freq="M")
)
# TODO: account for any skipped months; could specify a DateOffset to
# `freq` param of `pandas.DataFrame.shift`
yoy = sales.pct_change(periods=12)
return sales, yoy | Monthly retail trade data from census.gov. | Below is the the instruction that describes the task:
### Input:
Monthly retail trade data from census.gov.
### Response:
def load_retaildata():
"""Monthly retail trade data from census.gov."""
# full = 'https://www.census.gov/retail/mrts/www/mrtssales92-present.xls'
# indiv = 'https://www.census.gov/retail/marts/www/timeseries.html'
db = {
"Auto, other Motor Vehicle": "https://www.census.gov/retail/marts/www/adv441x0.txt",
"Building Material and Garden Equipment and Supplies Dealers": "https://www.census.gov/retail/marts/www/adv44400.txt",
"Clothing and Clothing Accessories Stores": "https://www.census.gov/retail/marts/www/adv44800.txt",
"Dept. Stores (ex. leased depts)": "https://www.census.gov/retail/marts/www/adv45210.txt",
"Electronics and Appliance Stores": "https://www.census.gov/retail/marts/www/adv44300.txt",
"Food Services and Drinking Places": "https://www.census.gov/retail/marts/www/adv72200.txt",
"Food and Beverage Stores": "https://www.census.gov/retail/marts/www/adv44500.txt",
"Furniture and Home Furnishings Stores": "https://www.census.gov/retail/marts/www/adv44200.txt",
"Gasoline Stations": "https://www.census.gov/retail/marts/www/adv44700.txt",
"General Merchandise Stores": "https://www.census.gov/retail/marts/www/adv45200.txt",
"Grocery Stores": "https://www.census.gov/retail/marts/www/adv44510.txt",
"Health and Personal Care Stores": "https://www.census.gov/retail/marts/www/adv44600.txt",
"Miscellaneous Store Retailers": "https://www.census.gov/retail/marts/www/adv45300.txt",
"Motor Vehicle and Parts Dealers": "https://www.census.gov/retail/marts/www/adv44100.txt",
"Nonstore Retailers": "https://www.census.gov/retail/marts/www/adv45400.txt",
"Retail and Food Services, total": "https://www.census.gov/retail/marts/www/adv44x72.txt",
"Retail, total": "https://www.census.gov/retail/marts/www/adv44000.txt",
"Sporting Goods, Hobby, Book, and Music Stores": "https://www.census.gov/retail/marts/www/adv45100.txt",
"Total (excl. Motor Vehicle)": "https://www.census.gov/retail/marts/www/adv44y72.txt",
"Retail (excl. Motor Vehicle and Parts Dealers)": "https://www.census.gov/retail/marts/www/adv4400a.txt",
}
dct = {}
for key, value in db.items():
data = pd.read_csv(
value,
skiprows=5,
skip_blank_lines=True,
header=None,
sep="\s+",
index_col=0,
)
try:
cut = data.index.get_loc("SEASONAL")
except KeyError:
cut = data.index.get_loc("NO")
data = data.iloc[:cut]
data = data.apply(lambda col: pd.to_numeric(col, downcast="float"))
data = data.stack()
year = data.index.get_level_values(0)
month = data.index.get_level_values(1)
idx = pd.to_datetime(
{"year": year, "month": month, "day": 1}
) + offsets.MonthEnd(1)
data.index = idx
data.name = key
dct[key] = data
sales = pd.DataFrame(dct)
sales = sales.reindex(
pd.date_range(sales.index[0], sales.index[-1], freq="M")
)
# TODO: account for any skipped months; could specify a DateOffset to
# `freq` param of `pandas.DataFrame.shift`
yoy = sales.pct_change(periods=12)
return sales, yoy |
def _report_container_spec_metrics(self, pod_list, instance_tags):
"""Reports pod requests & limits by looking at pod specs."""
for pod in pod_list['items']:
pod_name = pod.get('metadata', {}).get('name')
pod_phase = pod.get('status', {}).get('phase')
if self._should_ignore_pod(pod_name, pod_phase):
continue
for ctr in pod['spec']['containers']:
if not ctr.get('resources'):
continue
c_name = ctr.get('name', '')
cid = None
for ctr_status in pod['status'].get('containerStatuses', []):
if ctr_status.get('name') == c_name:
# it is already prefixed with 'runtime://'
cid = ctr_status.get('containerID')
break
if not cid:
continue
pod_uid = pod.get('metadata', {}).get('uid')
if self.pod_list_utils.is_excluded(cid, pod_uid):
continue
tags = tagger.tag('%s' % cid, tagger.HIGH) + instance_tags
try:
for resource, value_str in iteritems(ctr.get('resources', {}).get('requests', {})):
value = self.parse_quantity(value_str)
self.gauge('{}.{}.requests'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container requests for %s: %s", c_name, e)
try:
for resource, value_str in iteritems(ctr.get('resources', {}).get('limits', {})):
value = self.parse_quantity(value_str)
self.gauge('{}.{}.limits'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container limits for %s: %s", c_name, e) | Reports pod requests & limits by looking at pod specs. | Below is the the instruction that describes the task:
### Input:
Reports pod requests & limits by looking at pod specs.
### Response:
def _report_container_spec_metrics(self, pod_list, instance_tags):
"""Reports pod requests & limits by looking at pod specs."""
for pod in pod_list['items']:
pod_name = pod.get('metadata', {}).get('name')
pod_phase = pod.get('status', {}).get('phase')
if self._should_ignore_pod(pod_name, pod_phase):
continue
for ctr in pod['spec']['containers']:
if not ctr.get('resources'):
continue
c_name = ctr.get('name', '')
cid = None
for ctr_status in pod['status'].get('containerStatuses', []):
if ctr_status.get('name') == c_name:
# it is already prefixed with 'runtime://'
cid = ctr_status.get('containerID')
break
if not cid:
continue
pod_uid = pod.get('metadata', {}).get('uid')
if self.pod_list_utils.is_excluded(cid, pod_uid):
continue
tags = tagger.tag('%s' % cid, tagger.HIGH) + instance_tags
try:
for resource, value_str in iteritems(ctr.get('resources', {}).get('requests', {})):
value = self.parse_quantity(value_str)
self.gauge('{}.{}.requests'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container requests for %s: %s", c_name, e)
try:
for resource, value_str in iteritems(ctr.get('resources', {}).get('limits', {})):
value = self.parse_quantity(value_str)
self.gauge('{}.{}.limits'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container limits for %s: %s", c_name, e) |
def fix2real(uval, conv):
"""
Convert a 32 bit unsigned int register into the value it represents in its Fixed arithmetic form.
@param uval: the numeric unsigned value in simulink representation
@param conv: conv structure with conversion specs as generated by I{get_conv}
@return: the real number represented by the Fixed arithmetic defined in conv
@todo: Better error detection and management of unsupported operations and arguments
"""
res = 0
int_val = ((uval & conv["int_mask"]) >> conv["bin_point"])
dec_val = conv["dec_step"] * (uval & conv["dec_mask"])
if conv["signed"] and (uval & conv["sign_mask"] > 0):
res = conv["int_min"] + int_val + dec_val
else:
res = int_val + dec_val
return (res / conv["scaling"]) | Convert a 32 bit unsigned int register into the value it represents in its Fixed arithmetic form.
@param uval: the numeric unsigned value in simulink representation
@param conv: conv structure with conversion specs as generated by I{get_conv}
@return: the real number represented by the Fixed arithmetic defined in conv
@todo: Better error detection and management of unsupported operations and arguments | Below is the the instruction that describes the task:
### Input:
Convert a 32 bit unsigned int register into the value it represents in its Fixed arithmetic form.
@param uval: the numeric unsigned value in simulink representation
@param conv: conv structure with conversion specs as generated by I{get_conv}
@return: the real number represented by the Fixed arithmetic defined in conv
@todo: Better error detection and management of unsupported operations and arguments
### Response:
def fix2real(uval, conv):
"""
Convert a 32 bit unsigned int register into the value it represents in its Fixed arithmetic form.
@param uval: the numeric unsigned value in simulink representation
@param conv: conv structure with conversion specs as generated by I{get_conv}
@return: the real number represented by the Fixed arithmetic defined in conv
@todo: Better error detection and management of unsupported operations and arguments
"""
res = 0
int_val = ((uval & conv["int_mask"]) >> conv["bin_point"])
dec_val = conv["dec_step"] * (uval & conv["dec_mask"])
if conv["signed"] and (uval & conv["sign_mask"] > 0):
res = conv["int_min"] + int_val + dec_val
else:
res = int_val + dec_val
return (res / conv["scaling"]) |
def drawDisplay( self, painter, option, rect, text ):
"""
Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str>
"""
painter.setBrush(Qt.NoBrush)
painter.drawText(rect.left() + 3,
rect.top(),
rect.width() - 3,
rect.height(),
option.displayAlignment,
text) | Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str> | Below is the the instruction that describes the task:
### Input:
Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str>
### Response:
def drawDisplay( self, painter, option, rect, text ):
"""
Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str>
"""
painter.setBrush(Qt.NoBrush)
painter.drawText(rect.left() + 3,
rect.top(),
rect.width() - 3,
rect.height(),
option.displayAlignment,
text) |
def composition_prediction(self, composition, to_this_composition=True):
"""
Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
preds = self.list_prediction(list(composition.keys()),
to_this_composition)
output = []
for p in preds:
if to_this_composition:
subs = {v: k for k, v in p['substitutions'].items()}
else:
subs = p['substitutions']
charge = 0
for k, v in composition.items():
charge += subs[k].oxi_state * v
if abs(charge) < 1e-8:
output.append(p)
logging.info('{} charge balanced substitutions found'
.format(len(output)))
return output | Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list. | Below is the the instruction that describes the task:
### Input:
Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
### Response:
def composition_prediction(self, composition, to_this_composition=True):
"""
Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list.
"""
preds = self.list_prediction(list(composition.keys()),
to_this_composition)
output = []
for p in preds:
if to_this_composition:
subs = {v: k for k, v in p['substitutions'].items()}
else:
subs = p['substitutions']
charge = 0
for k, v in composition.items():
charge += subs[k].oxi_state * v
if abs(charge) < 1e-8:
output.append(p)
logging.info('{} charge balanced substitutions found'
.format(len(output)))
return output |
def add_patch(self, *args, **kwargs):
"""
Shortcut for add_route with method PATCH
"""
return self.add_route(hdrs.METH_PATCH, *args, **kwargs) | Shortcut for add_route with method PATCH | Below is the the instruction that describes the task:
### Input:
Shortcut for add_route with method PATCH
### Response:
def add_patch(self, *args, **kwargs):
"""
Shortcut for add_route with method PATCH
"""
return self.add_route(hdrs.METH_PATCH, *args, **kwargs) |
def streaming_to_client():
"""Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances."""
for handler in client_logger.handlers:
if hasattr(handler, 'append_newlines'):
break
else:
handler = None
old_propagate = client_logger.propagate
client_logger.propagate = False
if handler is not None:
old_append = handler.append_newlines
handler.append_newlines = False
yield
client_logger.propagate = old_propagate
if handler is not None:
handler.append_newlines = old_append | Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances. | Below is the the instruction that describes the task:
### Input:
Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances.
### Response:
def streaming_to_client():
"""Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances."""
for handler in client_logger.handlers:
if hasattr(handler, 'append_newlines'):
break
else:
handler = None
old_propagate = client_logger.propagate
client_logger.propagate = False
if handler is not None:
old_append = handler.append_newlines
handler.append_newlines = False
yield
client_logger.propagate = old_propagate
if handler is not None:
handler.append_newlines = old_append |
def set_default_prediction_value(self, values):
"""
Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions.
"""
if type(values) is not list:
values = [float(values)]
self.tree_parameters.numPredictionDimensions = len(values)
for value in values:
self.tree_parameters.basePredictionValue.append(value) | Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions. | Below is the the instruction that describes the task:
### Input:
Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions.
### Response:
def set_default_prediction_value(self, values):
"""
Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions.
"""
if type(values) is not list:
values = [float(values)]
self.tree_parameters.numPredictionDimensions = len(values)
for value in values:
self.tree_parameters.basePredictionValue.append(value) |
def format(self):
"""Return the format attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT) | Return the format attribute of the BFD file being processed. | Below is the the instruction that describes the task:
### Input:
Return the format attribute of the BFD file being processed.
### Response:
def format(self):
"""Return the format attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT) |
def process_get(self):
"""
Analyse the GET request
:return:
* :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting
for authentication renewal
* :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting
for authentication renewal
:rtype: int
"""
# generate a new LT
self.gen_lt()
if not self.request.session.get("authenticated") or self.renew:
# authentication will be needed, initialize the form to use
self.init_form()
return self.USER_NOT_AUTHENTICATED
return self.USER_AUTHENTICATED | Analyse the GET request
:return:
* :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting
for authentication renewal
* :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting
for authentication renewal
:rtype: int | Below is the the instruction that describes the task:
### Input:
Analyse the GET request
:return:
* :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting
for authentication renewal
* :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting
for authentication renewal
:rtype: int
### Response:
def process_get(self):
"""
Analyse the GET request
:return:
* :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting
for authentication renewal
* :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting
for authentication renewal
:rtype: int
"""
# generate a new LT
self.gen_lt()
if not self.request.session.get("authenticated") or self.renew:
# authentication will be needed, initialize the form to use
self.init_form()
return self.USER_NOT_AUTHENTICATED
return self.USER_AUTHENTICATED |
def add_status_message(self, message, severity="info"):
"""Set a portal message
"""
self.context.plone_utils.addPortalMessage(message, severity) | Set a portal message | Below is the the instruction that describes the task:
### Input:
Set a portal message
### Response:
def add_status_message(self, message, severity="info"):
"""Set a portal message
"""
self.context.plone_utils.addPortalMessage(message, severity) |
def get_stp_mst_detail_output_cist_cist_reg_root_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_reg_root_id = ET.SubElement(cist, "cist-reg-root-id")
cist_reg_root_id.text = kwargs.pop('cist_reg_root_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_cist_cist_reg_root_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_reg_root_id = ET.SubElement(cist, "cist-reg-root-id")
cist_reg_root_id.text = kwargs.pop('cist_reg_root_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def ip_unnumbered(self, **kwargs):
"""Configure an unnumbered interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
donor_type (str): Interface type of the donor interface.
donor_name (str): Interface name of the donor interface.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is
not passed.
ValueError: if `int_type`, `name`, `donor_type`, or `donor_name`
are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230')
... int_type = 'tengigabitethernet'
... name = '230/0/20'
... donor_type = 'loopback'
... donor_name = '1'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... get=True)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... delete=True)
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230',
... delete=True)
... output = dev.interface.ip_unnumbered(int_type='hodor',
... donor_name=donor_name, donor_type=donor_type, name=name)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError
"""
kwargs['ip_donor_interface_name'] = kwargs.pop('donor_name')
kwargs['ip_donor_interface_type'] = kwargs.pop('donor_type')
kwargs['delete'] = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if kwargs['int_type'] not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
unnumbered_type = self._ip_unnumbered_type(**kwargs)
unnumbered_name = self._ip_unnumbered_name(**kwargs)
if kwargs.pop('get', False):
return self._get_ip_unnumbered(unnumbered_type, unnumbered_name)
config = pynos.utilities.merge_xml(unnumbered_type, unnumbered_name)
return callback(config) | Configure an unnumbered interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
donor_type (str): Interface type of the donor interface.
donor_name (str): Interface name of the donor interface.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is
not passed.
ValueError: if `int_type`, `name`, `donor_type`, or `donor_name`
are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230')
... int_type = 'tengigabitethernet'
... name = '230/0/20'
... donor_type = 'loopback'
... donor_name = '1'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... get=True)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... delete=True)
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230',
... delete=True)
... output = dev.interface.ip_unnumbered(int_type='hodor',
... donor_name=donor_name, donor_type=donor_type, name=name)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError | Below is the the instruction that describes the task:
### Input:
Configure an unnumbered interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
donor_type (str): Interface type of the donor interface.
donor_name (str): Interface name of the donor interface.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is
not passed.
ValueError: if `int_type`, `name`, `donor_type`, or `donor_name`
are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230')
... int_type = 'tengigabitethernet'
... name = '230/0/20'
... donor_type = 'loopback'
... donor_name = '1'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... get=True)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... delete=True)
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230',
... delete=True)
... output = dev.interface.ip_unnumbered(int_type='hodor',
... donor_name=donor_name, donor_type=donor_type, name=name)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError
### Response:
def ip_unnumbered(self, **kwargs):
"""Configure an unnumbered interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
donor_type (str): Interface type of the donor interface.
donor_name (str): Interface name of the donor interface.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is
not passed.
ValueError: if `int_type`, `name`, `donor_type`, or `donor_name`
are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230')
... int_type = 'tengigabitethernet'
... name = '230/0/20'
... donor_type = 'loopback'
... donor_name = '1'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... get=True)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... delete=True)
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230',
... delete=True)
... output = dev.interface.ip_unnumbered(int_type='hodor',
... donor_name=donor_name, donor_type=donor_type, name=name)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError
"""
kwargs['ip_donor_interface_name'] = kwargs.pop('donor_name')
kwargs['ip_donor_interface_type'] = kwargs.pop('donor_type')
kwargs['delete'] = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if kwargs['int_type'] not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
unnumbered_type = self._ip_unnumbered_type(**kwargs)
unnumbered_name = self._ip_unnumbered_name(**kwargs)
if kwargs.pop('get', False):
return self._get_ip_unnumbered(unnumbered_type, unnumbered_name)
config = pynos.utilities.merge_xml(unnumbered_type, unnumbered_name)
return callback(config) |
def plot_site(fignum, SiteRec, data, key):
"""
deprecated (used in ipmag)
"""
print('Site mean data: ')
print(' dec inc n_lines n_planes kappa R alpha_95 comp coord')
print(SiteRec['site_dec'], SiteRec['site_inc'], SiteRec['site_n_lines'], SiteRec['site_n_planes'], SiteRec['site_k'],
SiteRec['site_r'], SiteRec['site_alpha95'], SiteRec['site_comp_name'], SiteRec['site_tilt_correction'])
print('sample/specimen, dec, inc, n_specs/a95,| method codes ')
for i in range(len(data)):
print('%s: %s %s %s / %s | %s' % (data[i]['er_' + key + '_name'], data[i][key + '_dec'], data[i]
[key + '_inc'], data[i][key + '_n'], data[i][key + '_alpha95'], data[i]['magic_method_codes']))
plot_slnp(fignum, SiteRec, data, key)
plot = input("s[a]ve plot, [q]uit or <return> to continue: ")
if plot == 'q':
print("CUL8R")
sys.exit()
if plot == 'a':
files = {}
for key in list(EQ.keys()):
files[key] = site + '_' + key + '.' + fmt
save_plots(EQ, files) | deprecated (used in ipmag) | Below is the the instruction that describes the task:
### Input:
deprecated (used in ipmag)
### Response:
def plot_site(fignum, SiteRec, data, key):
"""
deprecated (used in ipmag)
"""
print('Site mean data: ')
print(' dec inc n_lines n_planes kappa R alpha_95 comp coord')
print(SiteRec['site_dec'], SiteRec['site_inc'], SiteRec['site_n_lines'], SiteRec['site_n_planes'], SiteRec['site_k'],
SiteRec['site_r'], SiteRec['site_alpha95'], SiteRec['site_comp_name'], SiteRec['site_tilt_correction'])
print('sample/specimen, dec, inc, n_specs/a95,| method codes ')
for i in range(len(data)):
print('%s: %s %s %s / %s | %s' % (data[i]['er_' + key + '_name'], data[i][key + '_dec'], data[i]
[key + '_inc'], data[i][key + '_n'], data[i][key + '_alpha95'], data[i]['magic_method_codes']))
plot_slnp(fignum, SiteRec, data, key)
plot = input("s[a]ve plot, [q]uit or <return> to continue: ")
if plot == 'q':
print("CUL8R")
sys.exit()
if plot == 'a':
files = {}
for key in list(EQ.keys()):
files[key] = site + '_' + key + '.' + fmt
save_plots(EQ, files) |
def start(io_loop=None, check_time=2):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or asyncio.get_event_loop()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
logger.warning("aiohttp_autoreload started more than once in the same process")
# if _has_execv:
# add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
logger.debug("Starting periodic checks for code changes")
call_periodic(check_time, callback, loop=io_loop) | Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated. | Below is the the instruction that describes the task:
### Input:
Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
### Response:
def start(io_loop=None, check_time=2):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or asyncio.get_event_loop()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
logger.warning("aiohttp_autoreload started more than once in the same process")
# if _has_execv:
# add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
logger.debug("Starting periodic checks for code changes")
call_periodic(check_time, callback, loop=io_loop) |
def get_spec(self):
"""
Return the Core ML spec
"""
if _mac_ver() >= (10, 14):
return self.vggish_model.get_spec()
else:
vggish_model_file = VGGish()
coreml_model_path = vggish_model_file.get_model_path(format='coreml')
return MLModel(coreml_model_path).get_spec() | Return the Core ML spec | Below is the the instruction that describes the task:
### Input:
Return the Core ML spec
### Response:
def get_spec(self):
"""
Return the Core ML spec
"""
if _mac_ver() >= (10, 14):
return self.vggish_model.get_spec()
else:
vggish_model_file = VGGish()
coreml_model_path = vggish_model_file.get_model_path(format='coreml')
return MLModel(coreml_model_path).get_spec() |
def _split_string_to_tokens(text):
"""Splits text to a list of string tokens."""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret | Splits text to a list of string tokens. | Below is the the instruction that describes the task:
### Input:
Splits text to a list of string tokens.
### Response:
def _split_string_to_tokens(text):
"""Splits text to a list of string tokens."""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret |
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime.datetime:
"""
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
"""
blob_obj = self._get_blob_obj(bucket, key)
return blob_obj.time_created | Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date | Below is the the instruction that describes the task:
### Input:
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
### Response:
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime.datetime:
"""
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
"""
blob_obj = self._get_blob_obj(bucket, key)
return blob_obj.time_created |
def get_rendered_objects(self):
"""Render objects"""
objects = self.objects
if isinstance(objects, str):
objects = getattr(self.object, objects).all()
return [
self.get_rendered_object(obj)
for obj in objects
] | Render objects | Below is the the instruction that describes the task:
### Input:
Render objects
### Response:
def get_rendered_objects(self):
"""Render objects"""
objects = self.objects
if isinstance(objects, str):
objects = getattr(self.object, objects).all()
return [
self.get_rendered_object(obj)
for obj in objects
] |
def mutex(self, mutex, **kwargs):
"""Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex.
"""
indicator_obj = Mutex(mutex, **kwargs)
return self._indicator(indicator_obj) | Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex. | Below is the the instruction that describes the task:
### Input:
Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex.
### Response:
def mutex(self, mutex, **kwargs):
"""Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex.
"""
indicator_obj = Mutex(mutex, **kwargs)
return self._indicator(indicator_obj) |
def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
"""
e = y - inv_link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar | Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array | Below is the the instruction that describes the task:
### Input:
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
### Response:
def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
"""
e = y - inv_link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar |
def get_port_profile_for_intf_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_profile_for_intf_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_cs_archs(self):
''' capstone disassembler '''
cs_archs = {
'x16': (CS_ARCH_X86, CS_MODE_16),
'x86': (CS_ARCH_X86, CS_MODE_32),
'x64': (CS_ARCH_X86, CS_MODE_64),
'arm': (CS_ARCH_ARM, CS_MODE_ARM),
'arm_t': (CS_ARCH_ARM, CS_MODE_THUMB),
'arm64': (CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN),
'mips32': (CS_ARCH_MIPS, CS_MODE_MIPS32),
'mips64': (CS_ARCH_MIPS, CS_MODE_MIPS64),
}
return cs_archs | capstone disassembler | Below is the the instruction that describes the task:
### Input:
capstone disassembler
### Response:
def get_cs_archs(self):
''' capstone disassembler '''
cs_archs = {
'x16': (CS_ARCH_X86, CS_MODE_16),
'x86': (CS_ARCH_X86, CS_MODE_32),
'x64': (CS_ARCH_X86, CS_MODE_64),
'arm': (CS_ARCH_ARM, CS_MODE_ARM),
'arm_t': (CS_ARCH_ARM, CS_MODE_THUMB),
'arm64': (CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN),
'mips32': (CS_ARCH_MIPS, CS_MODE_MIPS32),
'mips64': (CS_ARCH_MIPS, CS_MODE_MIPS64),
}
return cs_archs |
def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False | Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False. | Below is the the instruction that describes the task:
### Input:
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
### Response:
def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False |
def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start) | A string representation of the junction
:return: string represnetation
:rtype: string | Below is the the instruction that describes the task:
### Input:
A string representation of the junction
:return: string represnetation
:rtype: string
### Response:
def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start) |
def display(surface):
"""Displays a pygame.Surface in the window.
in pygame the window is represented through a surface, on which you can draw
as on any other pygame.Surface. A refernce to to the screen can be optained
via the :py:func:`pygame.display.get_surface` function. To display the
contents of the screen surface in the window :py:func:`pygame.display.flip`
needs to be called.
:py:func:`display` draws the surface onto the screen surface at the postion
(0, 0), and then calls :py:func:`flip`.
:param surface: the pygame.Surface to display
:type surface: pygame.Surface
"""
screen = pygame.display.get_surface()
screen.blit(surface, (0, 0))
pygame.display.flip() | Displays a pygame.Surface in the window.
in pygame the window is represented through a surface, on which you can draw
as on any other pygame.Surface. A refernce to to the screen can be optained
via the :py:func:`pygame.display.get_surface` function. To display the
contents of the screen surface in the window :py:func:`pygame.display.flip`
needs to be called.
:py:func:`display` draws the surface onto the screen surface at the postion
(0, 0), and then calls :py:func:`flip`.
:param surface: the pygame.Surface to display
:type surface: pygame.Surface | Below is the the instruction that describes the task:
### Input:
Displays a pygame.Surface in the window.
in pygame the window is represented through a surface, on which you can draw
as on any other pygame.Surface. A refernce to to the screen can be optained
via the :py:func:`pygame.display.get_surface` function. To display the
contents of the screen surface in the window :py:func:`pygame.display.flip`
needs to be called.
:py:func:`display` draws the surface onto the screen surface at the postion
(0, 0), and then calls :py:func:`flip`.
:param surface: the pygame.Surface to display
:type surface: pygame.Surface
### Response:
def display(surface):
"""Displays a pygame.Surface in the window.
in pygame the window is represented through a surface, on which you can draw
as on any other pygame.Surface. A refernce to to the screen can be optained
via the :py:func:`pygame.display.get_surface` function. To display the
contents of the screen surface in the window :py:func:`pygame.display.flip`
needs to be called.
:py:func:`display` draws the surface onto the screen surface at the postion
(0, 0), and then calls :py:func:`flip`.
:param surface: the pygame.Surface to display
:type surface: pygame.Surface
"""
screen = pygame.display.get_surface()
screen.blit(surface, (0, 0))
pygame.display.flip() |
def get_seqstr(config, metadata):
"""
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
"""
seq_abbrs = metadata.get('ScanningSequence', '').split('_')
seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs]
variants = [config['seqvar'].get(var, var) for var in \
metadata.get('SequenceVariant', '').split('_')]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs))
variants = list_to_str(variants)
return seqs, variants | Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names. | Below is the the instruction that describes the task:
### Input:
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
### Response:
def get_seqstr(config, metadata):
"""
Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names.
"""
seq_abbrs = metadata.get('ScanningSequence', '').split('_')
seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs]
variants = [config['seqvar'].get(var, var) for var in \
metadata.get('SequenceVariant', '').split('_')]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs))
variants = list_to_str(variants)
return seqs, variants |
def _default_read_frame(self, *, frame=None, mpkit=None):
"""Read frames with default engine.
- Extract frames and each layer of packets.
- Make Info object out of frame properties.
- Append Info.
- Write plist & append Info.
"""
from pcapkit.toolkit.default import (ipv4_reassembly, ipv6_reassembly,
tcp_reassembly, tcp_traceflow)
# read frame header
if not self._flag_m:
frame = Frame(self._ifile, num=self._frnum+1, proto=self._dlink,
layer=self._exlyr, protocol=self._exptl, nanosecond=self._nnsec)
self._frnum += 1
# verbose output
if self._flag_v:
print(f' - Frame {self._frnum:>3d}: {frame.protochain}')
# write plist
frnum = f'Frame {self._frnum}'
if not self._flag_q:
if self._flag_f:
ofile = self._ofile(f'{self._ofnm}/{frnum}.{self._fext}')
ofile(frame.info, name=frnum)
else:
self._ofile(frame.info, name=frnum)
# record fragments
if self._ipv4:
flag, data = ipv4_reassembly(frame)
if flag:
self._reasm[0](data) # pylint: disable=E1102
if self._ipv6:
flag, data = ipv6_reassembly(frame)
if flag:
self._reasm[1](data) # pylint: disable=E1102
if self._tcp:
flag, data = tcp_reassembly(frame)
if flag:
self._reasm[2](data) # pylint: disable=E1102
# trace flows
if self._flag_t:
flag, data = tcp_traceflow(frame, data_link=self._dlink)
if flag:
self._trace(data)
# record frames
if self._exeng == 'pipeline':
if self._flag_d:
# frame._file = NotImplemented
mpkit.frames[self._frnum] = frame
# print(self._frnum, 'stored')
mpkit.current += 1
elif self._exeng == 'server':
# record frames
if self._flag_d:
# frame._file = NotImplemented
self._frame.append(frame)
# print(self._frnum, 'stored')
self._frnum += 1
else:
if self._flag_d:
self._frame.append(frame)
self._proto = frame.protochain.chain
# return frame record
return frame | Read frames with default engine.
- Extract frames and each layer of packets.
- Make Info object out of frame properties.
- Append Info.
- Write plist & append Info. | Below is the the instruction that describes the task:
### Input:
Read frames with default engine.
- Extract frames and each layer of packets.
- Make Info object out of frame properties.
- Append Info.
- Write plist & append Info.
### Response:
def _default_read_frame(self, *, frame=None, mpkit=None):
"""Read frames with default engine.
- Extract frames and each layer of packets.
- Make Info object out of frame properties.
- Append Info.
- Write plist & append Info.
"""
from pcapkit.toolkit.default import (ipv4_reassembly, ipv6_reassembly,
tcp_reassembly, tcp_traceflow)
# read frame header
if not self._flag_m:
frame = Frame(self._ifile, num=self._frnum+1, proto=self._dlink,
layer=self._exlyr, protocol=self._exptl, nanosecond=self._nnsec)
self._frnum += 1
# verbose output
if self._flag_v:
print(f' - Frame {self._frnum:>3d}: {frame.protochain}')
# write plist
frnum = f'Frame {self._frnum}'
if not self._flag_q:
if self._flag_f:
ofile = self._ofile(f'{self._ofnm}/{frnum}.{self._fext}')
ofile(frame.info, name=frnum)
else:
self._ofile(frame.info, name=frnum)
# record fragments
if self._ipv4:
flag, data = ipv4_reassembly(frame)
if flag:
self._reasm[0](data) # pylint: disable=E1102
if self._ipv6:
flag, data = ipv6_reassembly(frame)
if flag:
self._reasm[1](data) # pylint: disable=E1102
if self._tcp:
flag, data = tcp_reassembly(frame)
if flag:
self._reasm[2](data) # pylint: disable=E1102
# trace flows
if self._flag_t:
flag, data = tcp_traceflow(frame, data_link=self._dlink)
if flag:
self._trace(data)
# record frames
if self._exeng == 'pipeline':
if self._flag_d:
# frame._file = NotImplemented
mpkit.frames[self._frnum] = frame
# print(self._frnum, 'stored')
mpkit.current += 1
elif self._exeng == 'server':
# record frames
if self._flag_d:
# frame._file = NotImplemented
self._frame.append(frame)
# print(self._frnum, 'stored')
self._frnum += 1
else:
if self._flag_d:
self._frame.append(frame)
self._proto = frame.protochain.chain
# return frame record
return frame |
async def profile(self, ctx, tag):
'''Example command for use inside a discord bot cog.'''
if not self.check_valid_tag(tag):
return await ctx.send('Invalid tag!')
profile = await self.cr.get_profile(tag)
em = discord.Embed(color=0x00FFFFF)
em.set_author(name=str(profile), icon_url=profile.clan_badge_url)
em.set_thumbnail(url=profile.arena.badge_url)
# Example of adding data. (Bad)
for attr in self.cdir(profile):
value = getattr(profile, attr)
if not callable(value):
em.add_field(
name=attr.replace('_').title(),
value=str(value)
)
await ctx.send(embed=em) | Example command for use inside a discord bot cog. | Below is the the instruction that describes the task:
### Input:
Example command for use inside a discord bot cog.
### Response:
async def profile(self, ctx, tag):
'''Example command for use inside a discord bot cog.'''
if not self.check_valid_tag(tag):
return await ctx.send('Invalid tag!')
profile = await self.cr.get_profile(tag)
em = discord.Embed(color=0x00FFFFF)
em.set_author(name=str(profile), icon_url=profile.clan_badge_url)
em.set_thumbnail(url=profile.arena.badge_url)
# Example of adding data. (Bad)
for attr in self.cdir(profile):
value = getattr(profile, attr)
if not callable(value):
em.add_field(
name=attr.replace('_').title(),
value=str(value)
)
await ctx.send(embed=em) |
def get_group_details(group):
""" Get group details. """
result = []
for datastore in _get_datastores():
value = datastore.get_group_details(group)
value['datastore'] = datastore.config['DESCRIPTION']
result.append(value)
return result | Get group details. | Below is the the instruction that describes the task:
### Input:
Get group details.
### Response:
def get_group_details(group):
""" Get group details. """
result = []
for datastore in _get_datastores():
value = datastore.get_group_details(group)
value['datastore'] = datastore.config['DESCRIPTION']
result.append(value)
return result |
def remove(self, resource):
"""Removes a resource from the context"""
if isinstance(resource, Resource):
self._resources.remove(resource) | Removes a resource from the context | Below is the the instruction that describes the task:
### Input:
Removes a resource from the context
### Response:
def remove(self, resource):
"""Removes a resource from the context"""
if isinstance(resource, Resource):
self._resources.remove(resource) |
def _factln(num):
# type: (int) -> float
"""
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
"""
if num < 20:
log_factorial = log(factorial(num))
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (
1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial | Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise. | Below is the the instruction that describes the task:
### Input:
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
### Response:
def _factln(num):
# type: (int) -> float
"""
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
"""
if num < 20:
log_factorial = log(factorial(num))
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (
1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial |
def keyword(
name: str,
ns: Optional[str] = None,
kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN,
) -> Keyword:
"""Create a new keyword."""
h = hash((name, ns))
return kw_cache.swap(__get_or_create, h, name, ns)[h] | Create a new keyword. | Below is the the instruction that describes the task:
### Input:
Create a new keyword.
### Response:
def keyword(
name: str,
ns: Optional[str] = None,
kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN,
) -> Keyword:
"""Create a new keyword."""
h = hash((name, ns))
return kw_cache.swap(__get_or_create, h, name, ns)[h] |
def insert_all(db, schema_name, table_name, columns, items):
"""
Insert all item in given items list into the specified table, schema_name.table_name.
"""
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
columns_list = ', '.join(columns)
values_list = ', '.join(['?'] * len(columns))
query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(
table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values) | Insert all item in given items list into the specified table, schema_name.table_name. | Below is the the instruction that describes the task:
### Input:
Insert all item in given items list into the specified table, schema_name.table_name.
### Response:
def insert_all(db, schema_name, table_name, columns, items):
"""
Insert all item in given items list into the specified table, schema_name.table_name.
"""
table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name
columns_list = ', '.join(columns)
values_list = ', '.join(['?'] * len(columns))
query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(
table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values) |
def train(cls, new_data, old=None):
"""
Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale
"""
if not len(new_data):
return old
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
if new_data.dtype.kind not in CONTINUOUS_KINDS:
raise TypeError(
"Discrete value supplied to continuous scale")
if old is not None:
new_data = np.hstack([new_data, old])
return min_max(new_data, na_rm=True, finite=True) | Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale | Below is the the instruction that describes the task:
### Input:
Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale
### Response:
def train(cls, new_data, old=None):
"""
Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale
"""
if not len(new_data):
return old
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
if new_data.dtype.kind not in CONTINUOUS_KINDS:
raise TypeError(
"Discrete value supplied to continuous scale")
if old is not None:
new_data = np.hstack([new_data, old])
return min_max(new_data, na_rm=True, finite=True) |
def resample(self, seed=None):
"""Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used.
"""
if seed is not None:
gen = torch.manual_seed(seed)
else:
gen = torch.default_generator
if self.replacement:
self.perm = torch.LongTensor(len(self)).random_(
len(self.dataset), generator=gen)
else:
self.perm = torch.randperm(
len(self.dataset), generator=gen).narrow(0, 0, len(self)) | Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used. | Below is the the instruction that describes the task:
### Input:
Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used.
### Response:
def resample(self, seed=None):
"""Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used.
"""
if seed is not None:
gen = torch.manual_seed(seed)
else:
gen = torch.default_generator
if self.replacement:
self.perm = torch.LongTensor(len(self)).random_(
len(self.dataset), generator=gen)
else:
self.perm = torch.randperm(
len(self.dataset), generator=gen).narrow(0, 0, len(self)) |
def get_activities_by_query(self, activity_query=None):
"""Gets a list of Activities matching the given activity query.
arg: activityQuery (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned ActivityList
raise: NullArgument - activityQuery is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - activityQuery is not of this service
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('activities',
bank_id=self._catalog_idstr)
query_terms = [v for k, v in activity_query._query_terms.items()]
url_path += '?' + '&'.join(query_terms)
objects.ActivityList(self._get_request(url_path)) | Gets a list of Activities matching the given activity query.
arg: activityQuery (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned ActivityList
raise: NullArgument - activityQuery is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - activityQuery is not of this service
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Gets a list of Activities matching the given activity query.
arg: activityQuery (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned ActivityList
raise: NullArgument - activityQuery is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - activityQuery is not of this service
compliance: mandatory - This method must be implemented.
### Response:
def get_activities_by_query(self, activity_query=None):
"""Gets a list of Activities matching the given activity query.
arg: activityQuery (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned ActivityList
raise: NullArgument - activityQuery is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - activityQuery is not of this service
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('activities',
bank_id=self._catalog_idstr)
query_terms = [v for k, v in activity_query._query_terms.items()]
url_path += '?' + '&'.join(query_terms)
objects.ActivityList(self._get_request(url_path)) |
def read_config(config):
"""Read config file and return uncomment line
"""
for line in config.splitlines():
line = line.lstrip()
if line and not line.startswith("#"):
return line
return "" | Read config file and return uncomment line | Below is the the instruction that describes the task:
### Input:
Read config file and return uncomment line
### Response:
def read_config(config):
"""Read config file and return uncomment line
"""
for line in config.splitlines():
line = line.lstrip()
if line and not line.startswith("#"):
return line
return "" |
def get_item_lookup_session(self):
"""Gets the ``OsidSession`` associated with the item lookup service.
return: (osid.assessment.ItemLookupSession) - an
``ItemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_lookup()`` is ``true``.*
"""
if not self.supports_item_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemLookupSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the item lookup service.
return: (osid.assessment.ItemLookupSession) - an
``ItemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_lookup()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the item lookup service.
return: (osid.assessment.ItemLookupSession) - an
``ItemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_lookup()`` is ``true``.*
### Response:
def get_item_lookup_session(self):
"""Gets the ``OsidSession`` associated with the item lookup service.
return: (osid.assessment.ItemLookupSession) - an
``ItemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_lookup()`` is ``true``.*
"""
if not self.supports_item_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemLookupSession(runtime=self._runtime) |
def Region2_cp0(Tr, Pr):
"""Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
"""
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-0.96927686500217E+01, 0.10086655968018E+02, -0.56087911283020E-02,
0.71452738081455E-01, -0.40710498223928E+00, 0.14240819171444E+01,
-0.43839511319450E+01, -0.28408632460772E+00, 0.21268463753307E-01]
go = log(Pr)
gop = Pr**-1
gopp = -Pr**-2
got = gott = gopt = 0
for j, ni in zip(Jo, no):
go += ni * Tr**j
got += ni*j * Tr**(j-1)
gott += ni*j*(j-1) * Tr**(j-2)
return go, gop, gopp, got, gott, gopt | Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16 | Below is the the instruction that describes the task:
### Input:
Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
### Response:
def Region2_cp0(Tr, Pr):
"""Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
"""
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-0.96927686500217E+01, 0.10086655968018E+02, -0.56087911283020E-02,
0.71452738081455E-01, -0.40710498223928E+00, 0.14240819171444E+01,
-0.43839511319450E+01, -0.28408632460772E+00, 0.21268463753307E-01]
go = log(Pr)
gop = Pr**-1
gopp = -Pr**-2
got = gott = gopt = 0
for j, ni in zip(Jo, no):
go += ni * Tr**j
got += ni*j * Tr**(j-1)
gott += ni*j*(j-1) * Tr**(j-2)
return go, gop, gopp, got, gott, gopt |
def scheduleServices(self, jobGraph):
"""
Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
"""
# Add jobGraph to set being processed by the service manager
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
# Add number of jobs managed by ServiceManager
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self._jobGraphsWithServicesToStart.put(jobGraph) | Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule. | Below is the the instruction that describes the task:
### Input:
Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
### Response:
def scheduleServices(self, jobGraph):
"""
Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
"""
# Add jobGraph to set being processed by the service manager
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
# Add number of jobs managed by ServiceManager
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self._jobGraphsWithServicesToStart.put(jobGraph) |
def get_search_score(query, choice, ignore_case=True, apply_regex=True,
template='{}'):
"""Returns a tuple with the enriched text (if a template is provided) and
a score for the match.
Parameters
----------
query : str
String with letters to search in choice (in order of appearance).
choice : str
Sentence/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
apply_regex : bool, optional
Optional value (True by default) to perform a regex search. Useful
when this function is called directly.
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : tuple
Tuples where the first item is the text (enriched if a template was
used) and the second item is a search score.
Notes
-----
The score is given according the following precedence (high to low):
- Letters in one word and no spaces with exact match.
Example: 'up' in 'up stroke'
- Letters in one word and no spaces with partial match.
Example: 'up' in 'upstream stroke'
- Letters in one word but with skip letters.
Example: 'cls' in 'close up'
- Letters in two or more words
Example: 'cls' in 'car lost'
"""
original_choice = choice
result = (original_choice, NOT_FOUND_SCORE)
# Handle empty string case
if not query:
return result
if ignore_case:
query = query.lower()
choice = choice.lower()
if apply_regex:
pattern = get_search_regex(query, ignore_case=ignore_case)
r = re.search(pattern, choice)
if r is None:
return result
else:
sep = u'-' # Matches will be replaced by this character
let = u'x' # Nonmatches (except spaed) will be replaced by this
score = 0
exact_words = [query == word for word in choice.split(u' ')]
partial_words = [query in word for word in choice.split(u' ')]
if any(exact_words) or any(partial_words):
pos_start = choice.find(query)
pos_end = pos_start + len(query)
score += pos_start
text = choice.replace(query, sep*len(query), 1)
enriched_text = original_choice[:pos_start] +\
template.format(original_choice[pos_start:pos_end]) +\
original_choice[pos_end:]
if any(exact_words):
# Check if the query words exists in a word with exact match
score += 1
elif any(partial_words):
# Check if the query words exists in a word with partial match
score += 100
else:
# Check letter by letter
text = [l for l in original_choice]
if ignore_case:
temp_text = [l.lower() for l in original_choice]
else:
temp_text = text[:]
# Give points to start of string
score += temp_text.index(query[0])
# Find the query letters and replace them by `sep`, also apply
# template as needed for enricching the letters in the text
enriched_text = text[:]
for char in query:
if char != u'' and char in temp_text:
index = temp_text.index(char)
enriched_text[index] = template.format(text[index])
text[index] = sep
temp_text = [u' ']*(index + 1) + temp_text[index+1:]
enriched_text = u''.join(enriched_text)
patterns_text = []
for i, char in enumerate(text):
if char != u' ' and char != sep:
new_char = let
else:
new_char = char
patterns_text.append(new_char)
patterns_text = u''.join(patterns_text)
for i in reversed(range(1, len(query) + 1)):
score += (len(query) - patterns_text.count(sep*i))*100000
temp = patterns_text.split(sep)
while u'' in temp:
temp.remove(u'')
if not patterns_text.startswith(sep):
temp = temp[1:]
if not patterns_text.endswith(sep):
temp = temp[:-1]
for pat in temp:
score += pat.count(u' ')*10000
score += pat.count(let)*100
return original_choice, enriched_text, score | Returns a tuple with the enriched text (if a template is provided) and
a score for the match.
Parameters
----------
query : str
String with letters to search in choice (in order of appearance).
choice : str
Sentence/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
apply_regex : bool, optional
Optional value (True by default) to perform a regex search. Useful
when this function is called directly.
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : tuple
Tuples where the first item is the text (enriched if a template was
used) and the second item is a search score.
Notes
-----
The score is given according the following precedence (high to low):
- Letters in one word and no spaces with exact match.
Example: 'up' in 'up stroke'
- Letters in one word and no spaces with partial match.
Example: 'up' in 'upstream stroke'
- Letters in one word but with skip letters.
Example: 'cls' in 'close up'
- Letters in two or more words
Example: 'cls' in 'car lost' | Below is the the instruction that describes the task:
### Input:
Returns a tuple with the enriched text (if a template is provided) and
a score for the match.
Parameters
----------
query : str
String with letters to search in choice (in order of appearance).
choice : str
Sentence/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
apply_regex : bool, optional
Optional value (True by default) to perform a regex search. Useful
when this function is called directly.
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : tuple
Tuples where the first item is the text (enriched if a template was
used) and the second item is a search score.
Notes
-----
The score is given according the following precedence (high to low):
- Letters in one word and no spaces with exact match.
Example: 'up' in 'up stroke'
- Letters in one word and no spaces with partial match.
Example: 'up' in 'upstream stroke'
- Letters in one word but with skip letters.
Example: 'cls' in 'close up'
- Letters in two or more words
Example: 'cls' in 'car lost'
### Response:
def get_search_score(query, choice, ignore_case=True, apply_regex=True,
template='{}'):
"""Returns a tuple with the enriched text (if a template is provided) and
a score for the match.
Parameters
----------
query : str
String with letters to search in choice (in order of appearance).
choice : str
Sentence/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
apply_regex : bool, optional
Optional value (True by default) to perform a regex search. Useful
when this function is called directly.
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : tuple
Tuples where the first item is the text (enriched if a template was
used) and the second item is a search score.
Notes
-----
The score is given according the following precedence (high to low):
- Letters in one word and no spaces with exact match.
Example: 'up' in 'up stroke'
- Letters in one word and no spaces with partial match.
Example: 'up' in 'upstream stroke'
- Letters in one word but with skip letters.
Example: 'cls' in 'close up'
- Letters in two or more words
Example: 'cls' in 'car lost'
"""
original_choice = choice
result = (original_choice, NOT_FOUND_SCORE)
# Handle empty string case
if not query:
return result
if ignore_case:
query = query.lower()
choice = choice.lower()
if apply_regex:
pattern = get_search_regex(query, ignore_case=ignore_case)
r = re.search(pattern, choice)
if r is None:
return result
else:
sep = u'-' # Matches will be replaced by this character
let = u'x' # Nonmatches (except spaed) will be replaced by this
score = 0
exact_words = [query == word for word in choice.split(u' ')]
partial_words = [query in word for word in choice.split(u' ')]
if any(exact_words) or any(partial_words):
pos_start = choice.find(query)
pos_end = pos_start + len(query)
score += pos_start
text = choice.replace(query, sep*len(query), 1)
enriched_text = original_choice[:pos_start] +\
template.format(original_choice[pos_start:pos_end]) +\
original_choice[pos_end:]
if any(exact_words):
# Check if the query words exists in a word with exact match
score += 1
elif any(partial_words):
# Check if the query words exists in a word with partial match
score += 100
else:
# Check letter by letter
text = [l for l in original_choice]
if ignore_case:
temp_text = [l.lower() for l in original_choice]
else:
temp_text = text[:]
# Give points to start of string
score += temp_text.index(query[0])
# Find the query letters and replace them by `sep`, also apply
# template as needed for enricching the letters in the text
enriched_text = text[:]
for char in query:
if char != u'' and char in temp_text:
index = temp_text.index(char)
enriched_text[index] = template.format(text[index])
text[index] = sep
temp_text = [u' ']*(index + 1) + temp_text[index+1:]
enriched_text = u''.join(enriched_text)
patterns_text = []
for i, char in enumerate(text):
if char != u' ' and char != sep:
new_char = let
else:
new_char = char
patterns_text.append(new_char)
patterns_text = u''.join(patterns_text)
for i in reversed(range(1, len(query) + 1)):
score += (len(query) - patterns_text.count(sep*i))*100000
temp = patterns_text.split(sep)
while u'' in temp:
temp.remove(u'')
if not patterns_text.startswith(sep):
temp = temp[1:]
if not patterns_text.endswith(sep):
temp = temp[:-1]
for pat in temp:
score += pat.count(u' ')*10000
score += pat.count(let)*100
return original_choice, enriched_text, score |
def scale_edges(self, multiplier):
'''Multiply all edges in this ``Tree`` by ``multiplier``'''
if not isinstance(multiplier,int) and not isinstance(multiplier,float):
raise TypeError("multiplier must be an int or float")
for node in self.traverse_preorder():
if node.edge_length is not None:
node.edge_length *= multiplier | Multiply all edges in this ``Tree`` by ``multiplier`` | Below is the the instruction that describes the task:
### Input:
Multiply all edges in this ``Tree`` by ``multiplier``
### Response:
def scale_edges(self, multiplier):
'''Multiply all edges in this ``Tree`` by ``multiplier``'''
if not isinstance(multiplier,int) and not isinstance(multiplier,float):
raise TypeError("multiplier must be an int or float")
for node in self.traverse_preorder():
if node.edge_length is not None:
node.edge_length *= multiplier |
def purity(rho: Density) -> bk.BKTensor:
"""
Calculate the purity of a mixed quantum state.
Purity, defined as tr(rho^2), has an upper bound of 1 for a pure state,
and a lower bound of 1/D (where D is the Hilbert space dimension) for a
competently mixed state.
Two closely related measures are the linear entropy, 1- purity, and the
participation ratio, 1/purity.
"""
tensor = rho.tensor
N = rho.qubit_nb
matrix = bk.reshape(tensor, [2**N, 2**N])
return bk.trace(bk.matmul(matrix, matrix)) | Calculate the purity of a mixed quantum state.
Purity, defined as tr(rho^2), has an upper bound of 1 for a pure state,
and a lower bound of 1/D (where D is the Hilbert space dimension) for a
competently mixed state.
Two closely related measures are the linear entropy, 1- purity, and the
participation ratio, 1/purity. | Below is the the instruction that describes the task:
### Input:
Calculate the purity of a mixed quantum state.
Purity, defined as tr(rho^2), has an upper bound of 1 for a pure state,
and a lower bound of 1/D (where D is the Hilbert space dimension) for a
competently mixed state.
Two closely related measures are the linear entropy, 1- purity, and the
participation ratio, 1/purity.
### Response:
def purity(rho: Density) -> bk.BKTensor:
"""
Calculate the purity of a mixed quantum state.
Purity, defined as tr(rho^2), has an upper bound of 1 for a pure state,
and a lower bound of 1/D (where D is the Hilbert space dimension) for a
competently mixed state.
Two closely related measures are the linear entropy, 1- purity, and the
participation ratio, 1/purity.
"""
tensor = rho.tensor
N = rho.qubit_nb
matrix = bk.reshape(tensor, [2**N, 2**N])
return bk.trace(bk.matmul(matrix, matrix)) |
def set_attributes(self, obj, **attributes):
""" Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set
"""
attributes_url = '{}/{}/attributes'.format(self.session_url, obj.ref)
attributes_list = [{u'name': str(name), u'value': str(value)} for name, value in attributes.items()]
self._request(RestMethod.patch, attributes_url, headers={'Content-Type': 'application/json'},
data=json.dumps(attributes_list)) | Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set | Below is the the instruction that describes the task:
### Input:
Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set
### Response:
def set_attributes(self, obj, **attributes):
""" Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set
"""
attributes_url = '{}/{}/attributes'.format(self.session_url, obj.ref)
attributes_list = [{u'name': str(name), u'value': str(value)} for name, value in attributes.items()]
self._request(RestMethod.patch, attributes_url, headers={'Content-Type': 'application/json'},
data=json.dumps(attributes_list)) |
def from_dict(cls, data):
"""
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
"""
return cls(
auth_class_ref=data.get("auth_class_ref"),
timestamp=data.get("timestamp"),
issuer=data.get("issuer"),
) | :type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object | Below is the the instruction that describes the task:
### Input:
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
### Response:
def from_dict(cls, data):
"""
:type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object
"""
return cls(
auth_class_ref=data.get("auth_class_ref"),
timestamp=data.get("timestamp"),
issuer=data.get("issuer"),
) |
def save(self):
""" Saves current patches list in the series file """
with open(self.series_file, "wb") as f:
for patchline in self.patchlines:
f.write(_encode_str(str(patchline)))
f.write(b"\n") | Saves current patches list in the series file | Below is the the instruction that describes the task:
### Input:
Saves current patches list in the series file
### Response:
def save(self):
""" Saves current patches list in the series file """
with open(self.series_file, "wb") as f:
for patchline in self.patchlines:
f.write(_encode_str(str(patchline)))
f.write(b"\n") |
def delete_entitlement(owner, repo, identifier):
"""Delete an entitlement from a repository."""
client = get_entitlements_api()
with catch_raise_api_exception():
_, _, headers = client.entitlements_delete_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers) | Delete an entitlement from a repository. | Below is the the instruction that describes the task:
### Input:
Delete an entitlement from a repository.
### Response:
def delete_entitlement(owner, repo, identifier):
"""Delete an entitlement from a repository."""
client = get_entitlements_api()
with catch_raise_api_exception():
_, _, headers = client.entitlements_delete_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers) |
def _conv_general_permutations(self, dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C')
charpairs = (lhs_char, rhs_char, out_char)
for i, (a, b) in enumerate(charpairs):
if not (dimension_numbers[i].count(a) == 1 and
dimension_numbers[i].count(b) == 1):
msg = ('convolution dimension_numbers[{}] must contain the characters '
'"{}" and "{}" exatly once, got {}.')
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ('convolution dimension_numbers[{}] cannot have duplicate '
'characters, got {}.')
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ('convolution dimension_numbers elements must each have the same '
'set of spatial characters, got {}.')
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm | Utility for convolution dimension permutations relative to Conv HLO. | Below is the the instruction that describes the task:
### Input:
Utility for convolution dimension permutations relative to Conv HLO.
### Response:
def _conv_general_permutations(self, dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C')
charpairs = (lhs_char, rhs_char, out_char)
for i, (a, b) in enumerate(charpairs):
if not (dimension_numbers[i].count(a) == 1 and
dimension_numbers[i].count(b) == 1):
msg = ('convolution dimension_numbers[{}] must contain the characters '
'"{}" and "{}" exatly once, got {}.')
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ('convolution dimension_numbers[{}] cannot have duplicate '
'characters, got {}.')
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ('convolution dimension_numbers elements must each have the same '
'set of spatial characters, got {}.')
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm |
def get_namespaced_custom_object_status(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
"""get_namespaced_custom_object_status # noqa: E501
read status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
else:
(data) = self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
return data | get_namespaced_custom_object_status # noqa: E501
read status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
get_namespaced_custom_object_status # noqa: E501
read status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
### Response:
def get_namespaced_custom_object_status(self, group, version, namespace, plural, name, **kwargs): # noqa: E501
"""get_namespaced_custom_object_status # noqa: E501
read status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
else:
(data) = self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs) # noqa: E501
return data |
def load(self, file, **options):
""" Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:keyword str section: section in the ``.ini`` *file* to lookup the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object
field valus as well (chained method call).
:keyword bool verbose: if ``True`` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}'
"""
section = options.pop('section', self.__class__.__name__)
parser = ConfigParser()
parser.read(file)
if parser.has_section(section):
verbose(options, "[{0}]".format(section))
for field_path, field in self.field_items(**options):
if field_path.startswith('['):
# Sequence element
option = '_' + field_path
else:
option = field_path
if parser.has_option(section, option):
# Bool fields
if field.is_bool():
field.value = parser.getboolean(section, option)
# Float fields
elif field.is_float():
field.value = parser.getfloat(section, option)
# String fields
elif field.is_string():
field.value = parser.get(section, option)
# Stream fields
elif field.is_stream():
value = parser.get(section, option)
stream = bytes.fromhex(value.replace("'", ""))
# Auto size a zero sized stream field to the current length
if not field:
field.resize(len(stream))
field.value = stream
# Decimal fields
else:
field.value = parser.get(section, option)
if field_path.startswith('['):
verbose(options,
"{0}{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options,
"{0}.{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options, "No section [{0}] found.".format(section)) | Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:keyword str section: section in the ``.ini`` *file* to lookup the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object
field valus as well (chained method call).
:keyword bool verbose: if ``True`` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}' | Below is the the instruction that describes the task:
### Input:
Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:keyword str section: section in the ``.ini`` *file* to lookup the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object
field valus as well (chained method call).
:keyword bool verbose: if ``True`` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}'
### Response:
def load(self, file, **options):
""" Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:keyword str section: section in the ``.ini`` *file* to lookup the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object
field valus as well (chained method call).
:keyword bool verbose: if ``True`` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}'
"""
section = options.pop('section', self.__class__.__name__)
parser = ConfigParser()
parser.read(file)
if parser.has_section(section):
verbose(options, "[{0}]".format(section))
for field_path, field in self.field_items(**options):
if field_path.startswith('['):
# Sequence element
option = '_' + field_path
else:
option = field_path
if parser.has_option(section, option):
# Bool fields
if field.is_bool():
field.value = parser.getboolean(section, option)
# Float fields
elif field.is_float():
field.value = parser.getfloat(section, option)
# String fields
elif field.is_string():
field.value = parser.get(section, option)
# Stream fields
elif field.is_stream():
value = parser.get(section, option)
stream = bytes.fromhex(value.replace("'", ""))
# Auto size a zero sized stream field to the current length
if not field:
field.resize(len(stream))
field.value = stream
# Decimal fields
else:
field.value = parser.get(section, option)
if field_path.startswith('['):
verbose(options,
"{0}{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options,
"{0}.{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options, "No section [{0}] found.".format(section)) |
def parse(self, configManager, config):
"""
Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
parser = ConfigParser.RawConfigParser()
configOptions = dict()
configFile = self._getConfigFile(config)
if configFile:
parser.readfp(configFile)
for section in parser.sections():
if self.sections is None or section in self.sections:
configOptions.update(parser.items(section))
return configOptions | Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object. | Below is the the instruction that describes the task:
### Input:
Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
### Response:
def parse(self, configManager, config):
"""
Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
parser = ConfigParser.RawConfigParser()
configOptions = dict()
configFile = self._getConfigFile(config)
if configFile:
parser.readfp(configFile)
for section in parser.sections():
if self.sections is None or section in self.sections:
configOptions.update(parser.items(section))
return configOptions |
def _get_loftee(data):
"""Retrieve loss of function plugin parameters for LOFTEE.
https://github.com/konradjk/loftee
"""
ancestral_file = tz.get_in(("genome_resources", "variation", "ancestral"), data)
if not ancestral_file or not os.path.exists(ancestral_file):
ancestral_file = "false"
vep = config_utils.get_program("vep", data["config"])
args = ["--plugin", "LoF,human_ancestor_fa:%s,loftee_path:%s" %
(ancestral_file, os.path.dirname(os.path.realpath(vep)))]
return args | Retrieve loss of function plugin parameters for LOFTEE.
https://github.com/konradjk/loftee | Below is the the instruction that describes the task:
### Input:
Retrieve loss of function plugin parameters for LOFTEE.
https://github.com/konradjk/loftee
### Response:
def _get_loftee(data):
"""Retrieve loss of function plugin parameters for LOFTEE.
https://github.com/konradjk/loftee
"""
ancestral_file = tz.get_in(("genome_resources", "variation", "ancestral"), data)
if not ancestral_file or not os.path.exists(ancestral_file):
ancestral_file = "false"
vep = config_utils.get_program("vep", data["config"])
args = ["--plugin", "LoF,human_ancestor_fa:%s,loftee_path:%s" %
(ancestral_file, os.path.dirname(os.path.realpath(vep)))]
return args |
def _is_not_pickle_safe_gl_class(obj_class):
"""
Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model.
"""
gl_ds = [_SFrame, _SArray, _SGraph]
# Object is GLC-DS or GLC-Model
return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class) | Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model. | Below is the the instruction that describes the task:
### Input:
Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model.
### Response:
def _is_not_pickle_safe_gl_class(obj_class):
"""
Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model.
"""
gl_ds = [_SFrame, _SArray, _SGraph]
# Object is GLC-DS or GLC-Model
return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class) |
def joint_distances(self):
'''Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint.
'''
return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints] | Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint. | Below is the the instruction that describes the task:
### Input:
Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint.
### Response:
def joint_distances(self):
'''Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint.
'''
return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints] |
def find_uuid(es_url, index):
""" Find the unique identifier field for a given index """
uid_field = None
# Get the first item to detect the data source and raw/enriched type
res = requests.get('%s/%s/_search?size=1' % (es_url, index))
first_item = res.json()['hits']['hits'][0]['_source']
fields = first_item.keys()
if 'uuid' in fields:
uid_field = 'uuid'
else:
# Non perceval backend
uuid_value = res.json()['hits']['hits'][0]['_id']
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field | Find the unique identifier field for a given index | Below is the the instruction that describes the task:
### Input:
Find the unique identifier field for a given index
### Response:
def find_uuid(es_url, index):
""" Find the unique identifier field for a given index """
uid_field = None
# Get the first item to detect the data source and raw/enriched type
res = requests.get('%s/%s/_search?size=1' % (es_url, index))
first_item = res.json()['hits']['hits'][0]['_source']
fields = first_item.keys()
if 'uuid' in fields:
uid_field = 'uuid'
else:
# Non perceval backend
uuid_value = res.json()['hits']['hits'][0]['_id']
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field |
def p_class_constant_declaration(p):
'''class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar'''
if len(p) == 6:
p[0] = p[1] + [ast.ClassConstant(p[3], p[5], lineno=p.lineno(2))]
else:
p[0] = [ast.ClassConstant(p[2], p[4], lineno=p.lineno(1))] | class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar | Below is the the instruction that describes the task:
### Input:
class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar
### Response:
def p_class_constant_declaration(p):
'''class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar'''
if len(p) == 6:
p[0] = p[1] + [ast.ClassConstant(p[3], p[5], lineno=p.lineno(2))]
else:
p[0] = [ast.ClassConstant(p[2], p[4], lineno=p.lineno(1))] |
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Make sure that a source is given, and is valid (nor erroneous)
if cat_dict_class != Error:
try:
source = self._check_cat_dict_source(cat_dict_class,
key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return False
if source is None:
return False
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if compare_to_existing and cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
# If this is an alias, add it to the parent catalog's reverse
# dictionary linking aliases to names for fast lookup.
if key_in_self == self._KEYS.ALIAS:
# Check if this adding this alias makes us a dupe, if so mark
# ourselves as a dupe.
if (check_for_dupes and 'aliases' in dir(self.catalog) and
new_entry[QUANTITY.VALUE] in self.catalog.aliases):
possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]]
# print(possible_dupe)
if (possible_dupe != self[self._KEYS.NAME] and
possible_dupe in self.catalog.entries):
self.dupe_of.append(possible_dupe)
if 'aliases' in dir(self.catalog):
self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[
self._KEYS.NAME]
self.setdefault(key_in_self, []).append(new_entry)
if (key_in_self == self._KEYS.ALIAS and check_for_dupes and
self.dupe_of):
self.merge_dupes()
return True | Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry. | Below is the the instruction that describes the task:
### Input:
Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
### Response:
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Make sure that a source is given, and is valid (nor erroneous)
if cat_dict_class != Error:
try:
source = self._check_cat_dict_source(cat_dict_class,
key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return False
if source is None:
return False
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if compare_to_existing and cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
# If this is an alias, add it to the parent catalog's reverse
# dictionary linking aliases to names for fast lookup.
if key_in_self == self._KEYS.ALIAS:
# Check if this adding this alias makes us a dupe, if so mark
# ourselves as a dupe.
if (check_for_dupes and 'aliases' in dir(self.catalog) and
new_entry[QUANTITY.VALUE] in self.catalog.aliases):
possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]]
# print(possible_dupe)
if (possible_dupe != self[self._KEYS.NAME] and
possible_dupe in self.catalog.entries):
self.dupe_of.append(possible_dupe)
if 'aliases' in dir(self.catalog):
self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[
self._KEYS.NAME]
self.setdefault(key_in_self, []).append(new_entry)
if (key_in_self == self._KEYS.ALIAS and check_for_dupes and
self.dupe_of):
self.merge_dupes()
return True |
def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
"""Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
"""
# If both input locations and observations are `None`, we consider this
# "empty" observation data.
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False | Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty. | Below is the the instruction that describes the task:
### Input:
Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
### Response:
def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
"""Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty.
"""
# If both input locations and observations are `None`, we consider this
# "empty" observation data.
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False |
def parse_minionqc_report(self, s_name, f):
'''
Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part.
'''
try:
# Parsing as OrderedDict is slightly messier with YAML
# http://stackoverflow.com/a/21048064/713980
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor)
summary_dict = yaml.safe_load(f)
except Exception as e:
log.error("Error parsing MinIONQC input file: {}".format(f))
return
# Do a deep copy as dicts are immutable
self.minionqc_raw_data[s_name] = copy.deepcopy(summary_dict)
# get q value threshold used for reads
q_threshold = None
for k in summary_dict.keys():
if k.startswith('Q>='):
q_threshold = k
data_dict = {}
data_dict['all'] = summary_dict['All reads'] # all reads
data_dict['q_filt'] = summary_dict[q_threshold] # quality filtered reads
for q_key in ['all', 'q_filt']:
for key_1 in ['reads', 'gigabases']:
for key_2 in data_dict[q_key][key_1]:
new_key = '{} {}'.format(key_1, key_2)
data_dict[q_key][new_key] = data_dict[q_key][key_1][key_2]
data_dict[q_key].pop(key_1) # removes key after flattening
self.minionqc_data[s_name] = data_dict['all'] # stats for all reads
self.qfilt_data[s_name] = data_dict['q_filt'] # stats for q-filtered reads
self.q_threshold_list.add(q_threshold) | Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part. | Below is the the instruction that describes the task:
### Input:
Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part.
### Response:
def parse_minionqc_report(self, s_name, f):
'''
Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part.
'''
try:
# Parsing as OrderedDict is slightly messier with YAML
# http://stackoverflow.com/a/21048064/713980
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor)
summary_dict = yaml.safe_load(f)
except Exception as e:
log.error("Error parsing MinIONQC input file: {}".format(f))
return
# Do a deep copy as dicts are immutable
self.minionqc_raw_data[s_name] = copy.deepcopy(summary_dict)
# get q value threshold used for reads
q_threshold = None
for k in summary_dict.keys():
if k.startswith('Q>='):
q_threshold = k
data_dict = {}
data_dict['all'] = summary_dict['All reads'] # all reads
data_dict['q_filt'] = summary_dict[q_threshold] # quality filtered reads
for q_key in ['all', 'q_filt']:
for key_1 in ['reads', 'gigabases']:
for key_2 in data_dict[q_key][key_1]:
new_key = '{} {}'.format(key_1, key_2)
data_dict[q_key][new_key] = data_dict[q_key][key_1][key_2]
data_dict[q_key].pop(key_1) # removes key after flattening
self.minionqc_data[s_name] = data_dict['all'] # stats for all reads
self.qfilt_data[s_name] = data_dict['q_filt'] # stats for q-filtered reads
self.q_threshold_list.add(q_threshold) |
def isSane(self):
"""
This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool}
"""
if self.host == 'localhost':
return True
host_parts = self.host.split('.')
if self.wildcard:
assert host_parts[0] == '', host_parts
del host_parts[0]
# If it's an absolute domain name, remove the empty string
# from the end.
if host_parts and not host_parts[-1]:
del host_parts[-1]
if not host_parts:
return False
# Do not allow adjacent dots
if '' in host_parts:
return False
tld = host_parts[-1]
if tld not in _top_level_domains:
return False
if len(host_parts) == 1:
return False
if self.wildcard:
if len(tld) == 2 and len(host_parts[-2]) <= 3:
# It's a 2-letter tld with a short second to last segment
# so there needs to be more than two segments specified
# (e.g. *.co.uk is insane)
return len(host_parts) > 2
# Passed all tests for insanity.
return True | This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool} | Below is the the instruction that describes the task:
### Input:
This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool}
### Response:
def isSane(self):
"""
This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool}
"""
if self.host == 'localhost':
return True
host_parts = self.host.split('.')
if self.wildcard:
assert host_parts[0] == '', host_parts
del host_parts[0]
# If it's an absolute domain name, remove the empty string
# from the end.
if host_parts and not host_parts[-1]:
del host_parts[-1]
if not host_parts:
return False
# Do not allow adjacent dots
if '' in host_parts:
return False
tld = host_parts[-1]
if tld not in _top_level_domains:
return False
if len(host_parts) == 1:
return False
if self.wildcard:
if len(tld) == 2 and len(host_parts[-2]) <= 3:
# It's a 2-letter tld with a short second to last segment
# so there needs to be more than two segments specified
# (e.g. *.co.uk is insane)
return len(host_parts) > 2
# Passed all tests for insanity.
return True |
def invoice(request, invoice_id, access_code=None):
''' Displays an invoice.
This view is not authenticated, but it will only allow access to either:
the user the invoice belongs to; staff; or a request made with the correct
access code.
Arguments:
invoice_id (castable to int): The invoice_id for the invoice you want
to view.
access_code (Optional[str]): The access code for the user who owns
this invoice.
Returns:
render:
Renders ``registrasion/invoice.html``, with the following
data::
{
"invoice": models.commerce.Invoice(),
}
Raises:
Http404: if the current user cannot view this invoice and the correct
access_code is not provided.
'''
current_invoice = InvoiceController.for_id_or_404(invoice_id)
if not current_invoice.can_view(
user=request.user,
access_code=access_code,
):
raise Http404()
data = {
"invoice": current_invoice.invoice,
}
return render(request, "registrasion/invoice.html", data) | Displays an invoice.
This view is not authenticated, but it will only allow access to either:
the user the invoice belongs to; staff; or a request made with the correct
access code.
Arguments:
invoice_id (castable to int): The invoice_id for the invoice you want
to view.
access_code (Optional[str]): The access code for the user who owns
this invoice.
Returns:
render:
Renders ``registrasion/invoice.html``, with the following
data::
{
"invoice": models.commerce.Invoice(),
}
Raises:
Http404: if the current user cannot view this invoice and the correct
access_code is not provided. | Below is the the instruction that describes the task:
### Input:
Displays an invoice.
This view is not authenticated, but it will only allow access to either:
the user the invoice belongs to; staff; or a request made with the correct
access code.
Arguments:
invoice_id (castable to int): The invoice_id for the invoice you want
to view.
access_code (Optional[str]): The access code for the user who owns
this invoice.
Returns:
render:
Renders ``registrasion/invoice.html``, with the following
data::
{
"invoice": models.commerce.Invoice(),
}
Raises:
Http404: if the current user cannot view this invoice and the correct
access_code is not provided.
### Response:
def invoice(request, invoice_id, access_code=None):
''' Displays an invoice.
This view is not authenticated, but it will only allow access to either:
the user the invoice belongs to; staff; or a request made with the correct
access code.
Arguments:
invoice_id (castable to int): The invoice_id for the invoice you want
to view.
access_code (Optional[str]): The access code for the user who owns
this invoice.
Returns:
render:
Renders ``registrasion/invoice.html``, with the following
data::
{
"invoice": models.commerce.Invoice(),
}
Raises:
Http404: if the current user cannot view this invoice and the correct
access_code is not provided.
'''
current_invoice = InvoiceController.for_id_or_404(invoice_id)
if not current_invoice.can_view(
user=request.user,
access_code=access_code,
):
raise Http404()
data = {
"invoice": current_invoice.invoice,
}
return render(request, "registrasion/invoice.html", data) |
def get_cache_item(self):
'''Gets the cached item. Raises AttributeError if it hasn't been set.'''
if settings.DEBUG:
raise AttributeError('Caching disabled in DEBUG mode')
return getattr(self.template, self.options['template_cache_key']) | Gets the cached item. Raises AttributeError if it hasn't been set. | Below is the the instruction that describes the task:
### Input:
Gets the cached item. Raises AttributeError if it hasn't been set.
### Response:
def get_cache_item(self):
'''Gets the cached item. Raises AttributeError if it hasn't been set.'''
if settings.DEBUG:
raise AttributeError('Caching disabled in DEBUG mode')
return getattr(self.template, self.options['template_cache_key']) |
def removeUnreferencedElements(doc, keepDefs):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global _num_elements_removed
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != 'defs'):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
if not keepDefs:
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num | Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document. | Below is the the instruction that describes the task:
### Input:
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
### Response:
def removeUnreferencedElements(doc, keepDefs):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global _num_elements_removed
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != 'defs'):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
if not keepDefs:
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num |
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
"""
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n)) | The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. | Below is the the instruction that describes the task:
### Input:
The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
### Response:
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
"""
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n)) |
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the sample rate of the signal we are working with, in Hz. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
melpoints = numpy.linspace(lowmel,highmel,nfilt+2)
# our points are in Hz, but we use fft bins, so we have to convert
# from Hz to fft bin number
bin = numpy.floor((nfft+1)*mel2hz(melpoints)/samplerate)
fbank = numpy.zeros([nfilt,nfft//2+1])
for j in range(0,nfilt):
for i in range(int(bin[j]), int(bin[j+1])):
fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])
for i in range(int(bin[j+1]), int(bin[j+2])):
fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])
return fbank | Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the sample rate of the signal we are working with, in Hz. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter. | Below is the the instruction that describes the task:
### Input:
Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the sample rate of the signal we are working with, in Hz. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
### Response:
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the sample rate of the signal we are working with, in Hz. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
melpoints = numpy.linspace(lowmel,highmel,nfilt+2)
# our points are in Hz, but we use fft bins, so we have to convert
# from Hz to fft bin number
bin = numpy.floor((nfft+1)*mel2hz(melpoints)/samplerate)
fbank = numpy.zeros([nfilt,nfft//2+1])
for j in range(0,nfilt):
for i in range(int(bin[j]), int(bin[j+1])):
fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])
for i in range(int(bin[j+1]), int(bin[j+2])):
fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])
return fbank |
def _can_connect(host, port=22): # type: (str, int) -> bool
"""Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on.
"""
try:
logger.debug('Testing connection to host %s', host)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host,
port=port)
client.close()
logger.info('Can connect to host %s', host)
return True
except Exception as e:
logger.info('Cannot connect to host %s', host)
logger.info('Connection failed with exception: \n %s', str(e))
return False | Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on. | Below is the the instruction that describes the task:
### Input:
Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on.
### Response:
def _can_connect(host, port=22): # type: (str, int) -> bool
"""Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on.
"""
try:
logger.debug('Testing connection to host %s', host)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host,
port=port)
client.close()
logger.info('Can connect to host %s', host)
return True
except Exception as e:
logger.info('Cannot connect to host %s', host)
logger.info('Connection failed with exception: \n %s', str(e))
return False |
def auto_constraints(self, component=None):
"""
Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`.
"""
if not component:
for table in self.tables:
self.auto_constraints(table)
return
if not component.tableSchema.primaryKey:
idcol = component.get_column(term_uri('id'))
if idcol:
component.tableSchema.primaryKey = [idcol.name]
self._auto_foreign_keys(component)
try:
table_type = self.get_tabletype(component)
except ValueError:
# New component is not a known CLDF term, so cannot add components
# automatically. TODO: We might me able to infer some based on
# `xxxReference` column properties?
return
# auto-add foreign keys targetting the new component:
for table in self.tables:
self._auto_foreign_keys(table, component=component, table_type=table_type) | Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`. | Below is the the instruction that describes the task:
### Input:
Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`.
### Response:
def auto_constraints(self, component=None):
"""
Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`.
"""
if not component:
for table in self.tables:
self.auto_constraints(table)
return
if not component.tableSchema.primaryKey:
idcol = component.get_column(term_uri('id'))
if idcol:
component.tableSchema.primaryKey = [idcol.name]
self._auto_foreign_keys(component)
try:
table_type = self.get_tabletype(component)
except ValueError:
# New component is not a known CLDF term, so cannot add components
# automatically. TODO: We might me able to infer some based on
# `xxxReference` column properties?
return
# auto-add foreign keys targetting the new component:
for table in self.tables:
self._auto_foreign_keys(table, component=component, table_type=table_type) |
def value_compare(left, right, ordering=1):
"""
SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y.
"""
try:
ltype = left.__class__
rtype = right.__class__
if ltype in list_types or rtype in list_types:
if left == None:
return ordering
elif right == None:
return - ordering
left = listwrap(left)
right = listwrap(right)
for a, b in zip(left, right):
c = value_compare(a, b) * ordering
if c != 0:
return c
if len(left) < len(right):
return - ordering
elif len(left) > len(right):
return ordering
else:
return 0
if ltype is float and isnan(left):
left = None
ltype = none_type
if rtype is float and isnan(right):
right = None
rtype = none_type
null_order = ordering*10
ltype_num = TYPE_ORDER.get(ltype, null_order)
rtype_num = TYPE_ORDER.get(rtype, null_order)
type_diff = ltype_num - rtype_num
if type_diff != 0:
return ordering if type_diff > 0 else -ordering
if ltype_num == null_order:
return 0
elif ltype is builtin_tuple:
for a, b in zip(left, right):
c = value_compare(a, b)
if c != 0:
return c * ordering
return 0
elif ltype in data_types:
for k in sorted(set(left.keys()) | set(right.keys())):
c = value_compare(left.get(k), right.get(k)) * ordering
if c != 0:
return c
return 0
elif left > right:
return ordering
elif left < right:
return -ordering
else:
return 0
except Exception as e:
Log.error("Can not compare values {{left}} to {{right}}", left=left, right=right, cause=e) | SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y. | Below is the the instruction that describes the task:
### Input:
SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y.
### Response:
def value_compare(left, right, ordering=1):
"""
SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y.
"""
try:
ltype = left.__class__
rtype = right.__class__
if ltype in list_types or rtype in list_types:
if left == None:
return ordering
elif right == None:
return - ordering
left = listwrap(left)
right = listwrap(right)
for a, b in zip(left, right):
c = value_compare(a, b) * ordering
if c != 0:
return c
if len(left) < len(right):
return - ordering
elif len(left) > len(right):
return ordering
else:
return 0
if ltype is float and isnan(left):
left = None
ltype = none_type
if rtype is float and isnan(right):
right = None
rtype = none_type
null_order = ordering*10
ltype_num = TYPE_ORDER.get(ltype, null_order)
rtype_num = TYPE_ORDER.get(rtype, null_order)
type_diff = ltype_num - rtype_num
if type_diff != 0:
return ordering if type_diff > 0 else -ordering
if ltype_num == null_order:
return 0
elif ltype is builtin_tuple:
for a, b in zip(left, right):
c = value_compare(a, b)
if c != 0:
return c * ordering
return 0
elif ltype in data_types:
for k in sorted(set(left.keys()) | set(right.keys())):
c = value_compare(left.get(k), right.get(k)) * ordering
if c != 0:
return c
return 0
elif left > right:
return ordering
elif left < right:
return -ordering
else:
return 0
except Exception as e:
Log.error("Can not compare values {{left}} to {{right}}", left=left, right=right, cause=e) |
def _get_audio_channels(self, audio_abs_path):
"""
Parameters
----------
audio_abs_path : str
Returns
-------
channel_num : int
"""
channel_num = int(
subprocess.check_output(
("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'"""
).format(audio_abs_path, "Channels"),
shell=True, universal_newlines=True).rstrip())
return channel_num | Parameters
----------
audio_abs_path : str
Returns
-------
channel_num : int | Below is the the instruction that describes the task:
### Input:
Parameters
----------
audio_abs_path : str
Returns
-------
channel_num : int
### Response:
def _get_audio_channels(self, audio_abs_path):
"""
Parameters
----------
audio_abs_path : str
Returns
-------
channel_num : int
"""
channel_num = int(
subprocess.check_output(
("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'"""
).format(audio_abs_path, "Channels"),
shell=True, universal_newlines=True).rstrip())
return channel_num |
def ctx() -> moderngl.Context:
"""ModernGL context"""
win = window()
if not win.ctx:
raise RuntimeError("Attempting to get context before creation")
return win.ctx | ModernGL context | Below is the the instruction that describes the task:
### Input:
ModernGL context
### Response:
def ctx() -> moderngl.Context:
"""ModernGL context"""
win = window()
if not win.ctx:
raise RuntimeError("Attempting to get context before creation")
return win.ctx |
def get_qpimage(self, idx):
"""Return background-corrected QPImage of data at index `idx`"""
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx)
else:
# We can use the background data stored in the qpimage hdf5 file
with self._qpseries() as qps:
qpi = qps.get_qpimage(index=idx).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return background-corrected QPImage of data at index `idx` | Below is the the instruction that describes the task:
### Input:
Return background-corrected QPImage of data at index `idx`
### Response:
def get_qpimage(self, idx):
"""Return background-corrected QPImage of data at index `idx`"""
if self._bgdata:
# The user has explicitly chosen different background data
# using `get_qpimage_raw`.
qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx)
else:
# We can use the background data stored in the qpimage hdf5 file
with self._qpseries() as qps:
qpi = qps.get_qpimage(index=idx).copy()
# Force meta data
for key in self.meta_data:
qpi[key] = self.meta_data[key]
# set identifier
qpi["identifier"] = self.get_identifier(idx)
return qpi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.