code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self | Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group. | Below is the the instruction that describes the task:
### Input:
Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group.
### Response:
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self |
def delimiter_groups(line, begin_delim=begin_delim,
end_delim=end_delim):
"""Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc.
"""
text = []
line = iter(line)
while True:
# First build and yield an unsplittable group
for item in line:
text.append(item)
if item in begin_delim:
break
if not text:
break
yield text
# Now build and yield a splittable group
level = 0
text = []
for item in line:
if item in begin_delim:
level += 1
elif item in end_delim:
level -= 1
if level < 0:
yield text
text = [item]
break
text.append(item)
else:
assert not text, text
break | Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc. | Below is the the instruction that describes the task:
### Input:
Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc.
### Response:
def delimiter_groups(line, begin_delim=begin_delim,
end_delim=end_delim):
"""Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc.
"""
text = []
line = iter(line)
while True:
# First build and yield an unsplittable group
for item in line:
text.append(item)
if item in begin_delim:
break
if not text:
break
yield text
# Now build and yield a splittable group
level = 0
text = []
for item in line:
if item in begin_delim:
level += 1
elif item in end_delim:
level -= 1
if level < 0:
yield text
text = [item]
break
text.append(item)
else:
assert not text, text
break |
def StartObject(self, numfields):
"""StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested()
# use 32-bit offsets so that arithmetic doesn't overflow.
self.current_vtable = [0 for _ in range_func(numfields)]
self.objectEnd = self.Offset()
self.nested = True | StartObject initializes bookkeeping for writing a new object. | Below is the the instruction that describes the task:
### Input:
StartObject initializes bookkeeping for writing a new object.
### Response:
def StartObject(self, numfields):
"""StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested()
# use 32-bit offsets so that arithmetic doesn't overflow.
self.current_vtable = [0 for _ in range_func(numfields)]
self.objectEnd = self.Offset()
self.nested = True |
def copy_config_input_with_inactive(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
copy_config = ET.Element("copy_config")
config = copy_config
input = ET.SubElement(copy_config, "input")
with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def copy_config_input_with_inactive(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
copy_config = ET.Element("copy_config")
config = copy_config
input = ET.SubElement(copy_config, "input")
with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None,
Comment='', PrivateZone=False, DelegationSetId=None,
region=None, key=None, keyid=None, profile=None):
'''
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
Name
The name of the domain. This should be a fully-specified domain, and should terminate with
a period. This is the name you have registered with your DNS registrar. It is also the name
you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
response to this request.
VPCId
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName. Ignored if passed for a non-private zone.
VPCName
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId. Ignored if passed for a non-private zone.
VPCRegion
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for this option. Ignored if passed for
a non-private zone.
CallerReference
A unique string that identifies the request and that allows create_hosted_zone() calls to be
retried without the risk of executing the operation twice. This is a required parameter
when creating new Hosted Zones. Maximum length of 128.
Comment
Any comments you want to include about the hosted zone.
PrivateZone
Boolean - Set to True if creating a private hosted zone.
DelegationSetId
If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon
Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO
create_delegation_set() is not yet implemented, so you'd need to manually create any
delegation sets before utilizing this.
region
Region endpoint to connect to.
key
AWS key to bind with.
keyid
AWS keyid to bind with.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example::
salt myminion boto3_route53.create_hosted_zone example.org.
'''
if not Name.endswith('.'):
raise SaltInvocationError('Domain must be fully-qualified, complete with trailing period.')
Name = aws_encode(Name)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone,
region=region, key=key, keyid=keyid, profile=profile)
if deets:
log.info(
'Route 53 hosted zone %s already exists. You may want to pass '
'e.g. \'PrivateZone=True\' or similar...', Name
)
return None
args = {
'Name': Name,
'CallerReference': CallerReference,
'HostedZoneConfig': {
'Comment': Comment,
'PrivateZone': PrivateZone
}
}
args.update({'DelegationSetId': DelegationSetId}) if DelegationSetId else None
if PrivateZone:
if not _exactly_one((VPCName, VPCId)):
raise SaltInvocationError('Either VPCName or VPCId is required when creating a '
'private zone.')
vpcs = __salt__['boto_vpc.describe_vpcs'](
vpc_id=VPCId, name=VPCName, region=region, key=key,
keyid=keyid, profile=profile).get('vpcs', [])
if VPCRegion and vpcs:
vpcs = [v for v in vpcs if v['region'] == VPCRegion]
if not vpcs:
log.error('Private zone requested but no VPC matching given criteria found.')
return None
if len(vpcs) > 1:
log.error(
'Private zone requested but multiple VPCs matching given '
'criteria found: %s.', [v['id'] for v in vpcs]
)
return None
vpc = vpcs[0]
if VPCName:
VPCId = vpc['id']
if not VPCRegion:
VPCRegion = vpc['region']
args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
else:
if any((VPCId, VPCName, VPCRegion)):
log.info('Options VPCId, VPCName, and VPCRegion are ignored when creating '
'non-private zones.')
tries = 10
while tries:
try:
r = conn.create_hosted_zone(**args)
r.pop('ResponseMetadata', None)
if _wait_for_sync(r['ChangeInfo']['Id'], conn):
return [r]
return []
except ClientError as e:
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
tries -= 1
continue
log.error('Failed to create hosted zone %s: %s', Name, e)
return []
return [] | Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
Name
The name of the domain. This should be a fully-specified domain, and should terminate with
a period. This is the name you have registered with your DNS registrar. It is also the name
you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
response to this request.
VPCId
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName. Ignored if passed for a non-private zone.
VPCName
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId. Ignored if passed for a non-private zone.
VPCRegion
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for this option. Ignored if passed for
a non-private zone.
CallerReference
A unique string that identifies the request and that allows create_hosted_zone() calls to be
retried without the risk of executing the operation twice. This is a required parameter
when creating new Hosted Zones. Maximum length of 128.
Comment
Any comments you want to include about the hosted zone.
PrivateZone
Boolean - Set to True if creating a private hosted zone.
DelegationSetId
If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon
Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO
create_delegation_set() is not yet implemented, so you'd need to manually create any
delegation sets before utilizing this.
region
Region endpoint to connect to.
key
AWS key to bind with.
keyid
AWS keyid to bind with.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example::
salt myminion boto3_route53.create_hosted_zone example.org. | Below is the the instruction that describes the task:
### Input:
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
Name
The name of the domain. This should be a fully-specified domain, and should terminate with
a period. This is the name you have registered with your DNS registrar. It is also the name
you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
response to this request.
VPCId
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName. Ignored if passed for a non-private zone.
VPCName
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId. Ignored if passed for a non-private zone.
VPCRegion
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for this option. Ignored if passed for
a non-private zone.
CallerReference
A unique string that identifies the request and that allows create_hosted_zone() calls to be
retried without the risk of executing the operation twice. This is a required parameter
when creating new Hosted Zones. Maximum length of 128.
Comment
Any comments you want to include about the hosted zone.
PrivateZone
Boolean - Set to True if creating a private hosted zone.
DelegationSetId
If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon
Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO
create_delegation_set() is not yet implemented, so you'd need to manually create any
delegation sets before utilizing this.
region
Region endpoint to connect to.
key
AWS key to bind with.
keyid
AWS keyid to bind with.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example::
salt myminion boto3_route53.create_hosted_zone example.org.
### Response:
def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None,
Comment='', PrivateZone=False, DelegationSetId=None,
region=None, key=None, keyid=None, profile=None):
'''
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
Name
The name of the domain. This should be a fully-specified domain, and should terminate with
a period. This is the name you have registered with your DNS registrar. It is also the name
you will delegate from your registrar to the Amazon Route 53 delegation servers returned in
response to this request.
VPCId
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName. Ignored if passed for a non-private zone.
VPCName
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId. Ignored if passed for a non-private zone.
VPCRegion
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for this option. Ignored if passed for
a non-private zone.
CallerReference
A unique string that identifies the request and that allows create_hosted_zone() calls to be
retried without the risk of executing the operation twice. This is a required parameter
when creating new Hosted Zones. Maximum length of 128.
Comment
Any comments you want to include about the hosted zone.
PrivateZone
Boolean - Set to True if creating a private hosted zone.
DelegationSetId
If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon
Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO
create_delegation_set() is not yet implemented, so you'd need to manually create any
delegation sets before utilizing this.
region
Region endpoint to connect to.
key
AWS key to bind with.
keyid
AWS keyid to bind with.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example::
salt myminion boto3_route53.create_hosted_zone example.org.
'''
if not Name.endswith('.'):
raise SaltInvocationError('Domain must be fully-qualified, complete with trailing period.')
Name = aws_encode(Name)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone,
region=region, key=key, keyid=keyid, profile=profile)
if deets:
log.info(
'Route 53 hosted zone %s already exists. You may want to pass '
'e.g. \'PrivateZone=True\' or similar...', Name
)
return None
args = {
'Name': Name,
'CallerReference': CallerReference,
'HostedZoneConfig': {
'Comment': Comment,
'PrivateZone': PrivateZone
}
}
args.update({'DelegationSetId': DelegationSetId}) if DelegationSetId else None
if PrivateZone:
if not _exactly_one((VPCName, VPCId)):
raise SaltInvocationError('Either VPCName or VPCId is required when creating a '
'private zone.')
vpcs = __salt__['boto_vpc.describe_vpcs'](
vpc_id=VPCId, name=VPCName, region=region, key=key,
keyid=keyid, profile=profile).get('vpcs', [])
if VPCRegion and vpcs:
vpcs = [v for v in vpcs if v['region'] == VPCRegion]
if not vpcs:
log.error('Private zone requested but no VPC matching given criteria found.')
return None
if len(vpcs) > 1:
log.error(
'Private zone requested but multiple VPCs matching given '
'criteria found: %s.', [v['id'] for v in vpcs]
)
return None
vpc = vpcs[0]
if VPCName:
VPCId = vpc['id']
if not VPCRegion:
VPCRegion = vpc['region']
args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}})
else:
if any((VPCId, VPCName, VPCRegion)):
log.info('Options VPCId, VPCName, and VPCRegion are ignored when creating '
'non-private zones.')
tries = 10
while tries:
try:
r = conn.create_hosted_zone(**args)
r.pop('ResponseMetadata', None)
if _wait_for_sync(r['ChangeInfo']['Id'], conn):
return [r]
return []
except ClientError as e:
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
tries -= 1
continue
log.error('Failed to create hosted zone %s: %s', Name, e)
return []
return [] |
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option | add_option(Option)
add_option(opt_str, ..., kwarg=val, ...) | Below is the the instruction that describes the task:
### Input:
add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
### Response:
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option |
def _setup_file_logger(cls, session: AppSession, args):
'''Set up the file message logger.
A file log handler and with a formatter is added to the root logger.
'''
if not (args.output_file or args.append_output):
return
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.output_file:
filename = args.output_file
mode = 'w'
else:
filename = args.append_output
mode = 'a'
session.file_log_handler = handler = logging.FileHandler(
filename, mode, encoding='utf-8')
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.verbosity == logging.DEBUG:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO) | Set up the file message logger.
A file log handler and with a formatter is added to the root logger. | Below is the the instruction that describes the task:
### Input:
Set up the file message logger.
A file log handler and with a formatter is added to the root logger.
### Response:
def _setup_file_logger(cls, session: AppSession, args):
'''Set up the file message logger.
A file log handler and with a formatter is added to the root logger.
'''
if not (args.output_file or args.append_output):
return
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.output_file:
filename = args.output_file
mode = 'w'
else:
filename = args.append_output
mode = 'a'
session.file_log_handler = handler = logging.FileHandler(
filename, mode, encoding='utf-8')
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.verbosity == logging.DEBUG:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO) |
def during(rrule, duration=None, timestamp=None, **kwargs):
"""
Check if input timestamp is in rrule+duration period
:param rrule: rrule to check
:type rrule: str or dict
(freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.)
:param dict duration: time duration from rrule step. Ex:{'minutes': 60}
:param float timestamp: timestamp to check between rrule+duration. If None,
use now
"""
result = False
# if rrule is a string expression
if isinstance(rrule, string_types):
rrule_object = rrule_class.rrulestr(rrule)
else:
rrule_object = rrule_class(**rrule)
# if timestamp is None, use now
if timestamp is None:
timestamp = time()
# get now object
now = datetime.fromtimestamp(timestamp)
# get delta object
duration_delta = now if duration is None else relativedelta(**duration)
# get last date
last_date = rrule_object.before(now, inc=True)
# if a previous date exists
if last_date is not None:
next_date = last_date + duration_delta
# check if now is between last_date and next_date
result = last_date <= now <= next_date
return result | Check if input timestamp is in rrule+duration period
:param rrule: rrule to check
:type rrule: str or dict
(freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.)
:param dict duration: time duration from rrule step. Ex:{'minutes': 60}
:param float timestamp: timestamp to check between rrule+duration. If None,
use now | Below is the the instruction that describes the task:
### Input:
Check if input timestamp is in rrule+duration period
:param rrule: rrule to check
:type rrule: str or dict
(freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.)
:param dict duration: time duration from rrule step. Ex:{'minutes': 60}
:param float timestamp: timestamp to check between rrule+duration. If None,
use now
### Response:
def during(rrule, duration=None, timestamp=None, **kwargs):
"""
Check if input timestamp is in rrule+duration period
:param rrule: rrule to check
:type rrule: str or dict
(freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.)
:param dict duration: time duration from rrule step. Ex:{'minutes': 60}
:param float timestamp: timestamp to check between rrule+duration. If None,
use now
"""
result = False
# if rrule is a string expression
if isinstance(rrule, string_types):
rrule_object = rrule_class.rrulestr(rrule)
else:
rrule_object = rrule_class(**rrule)
# if timestamp is None, use now
if timestamp is None:
timestamp = time()
# get now object
now = datetime.fromtimestamp(timestamp)
# get delta object
duration_delta = now if duration is None else relativedelta(**duration)
# get last date
last_date = rrule_object.before(now, inc=True)
# if a previous date exists
if last_date is not None:
next_date = last_date + duration_delta
# check if now is between last_date and next_date
result = last_date <= now <= next_date
return result |
def disable_contactgroup_svc_notifications(self, contactgroup):
"""Disable service notifications for a contactgroup
Format of the line that triggers function call::
DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to disable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
"""
for contact_id in contactgroup.get_contacts():
self.disable_contact_svc_notifications(self.daemon.contacts[contact_id]) | Disable service notifications for a contactgroup
Format of the line that triggers function call::
DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to disable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None | Below is the the instruction that describes the task:
### Input:
Disable service notifications for a contactgroup
Format of the line that triggers function call::
DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to disable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
### Response:
def disable_contactgroup_svc_notifications(self, contactgroup):
"""Disable service notifications for a contactgroup
Format of the line that triggers function call::
DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to disable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
"""
for contact_id in contactgroup.get_contacts():
self.disable_contact_svc_notifications(self.daemon.contacts[contact_id]) |
def _fix_bias(op_name, attrs, num_inputs):
"""A workaround for 'use_bias' attribute since onnx don't provide this attribute,
we have to check the number of inputs to decide it."""
if num_inputs == 3:
attrs['no_bias'] = False
elif num_inputs == 2:
attrs['no_bias'] = True
else:
raise ValueError("Unexpected number of inputs for: {}".format(op_name))
return attrs | A workaround for 'use_bias' attribute since onnx don't provide this attribute,
we have to check the number of inputs to decide it. | Below is the the instruction that describes the task:
### Input:
A workaround for 'use_bias' attribute since onnx don't provide this attribute,
we have to check the number of inputs to decide it.
### Response:
def _fix_bias(op_name, attrs, num_inputs):
"""A workaround for 'use_bias' attribute since onnx don't provide this attribute,
we have to check the number of inputs to decide it."""
if num_inputs == 3:
attrs['no_bias'] = False
elif num_inputs == 2:
attrs['no_bias'] = True
else:
raise ValueError("Unexpected number of inputs for: {}".format(op_name))
return attrs |
def imported_classifiers_package(p: ecore.EPackage):
"""Determines which classifiers have to be imported into given package."""
classes = {c for c in p.eClassifiers if isinstance(c, ecore.EClass)}
references = itertools.chain(*(c.eAllReferences() for c in classes))
references_types = (r.eType for r in references)
imported = {c for c in references_types if getattr(c, 'ePackage', p) is not p}
imported_dict = {}
for classifier in imported:
imported_dict.setdefault(classifier.ePackage, set()).add(classifier)
return imported_dict | Determines which classifiers have to be imported into given package. | Below is the the instruction that describes the task:
### Input:
Determines which classifiers have to be imported into given package.
### Response:
def imported_classifiers_package(p: ecore.EPackage):
"""Determines which classifiers have to be imported into given package."""
classes = {c for c in p.eClassifiers if isinstance(c, ecore.EClass)}
references = itertools.chain(*(c.eAllReferences() for c in classes))
references_types = (r.eType for r in references)
imported = {c for c in references_types if getattr(c, 'ePackage', p) is not p}
imported_dict = {}
for classifier in imported:
imported_dict.setdefault(classifier.ePackage, set()).add(classifier)
return imported_dict |
def center_widget_on_screen(widget, screen=None):
"""
Centers given Widget on the screen.
:param widget: Current Widget.
:type widget: QWidget
:param screen: Screen used for centering.
:type screen: int
:return: Definition success.
:rtype: bool
"""
screen = screen and screen or QApplication.desktop().primaryScreen()
desktop_width = QApplication.desktop().screenGeometry(screen).width()
desktop_height = QApplication.desktop().screenGeometry(screen).height()
widget.move(desktop_width / 2 - widget.sizeHint().width() / 2, desktop_height / 2 - widget.sizeHint().height() / 2)
return True | Centers given Widget on the screen.
:param widget: Current Widget.
:type widget: QWidget
:param screen: Screen used for centering.
:type screen: int
:return: Definition success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Centers given Widget on the screen.
:param widget: Current Widget.
:type widget: QWidget
:param screen: Screen used for centering.
:type screen: int
:return: Definition success.
:rtype: bool
### Response:
def center_widget_on_screen(widget, screen=None):
"""
Centers given Widget on the screen.
:param widget: Current Widget.
:type widget: QWidget
:param screen: Screen used for centering.
:type screen: int
:return: Definition success.
:rtype: bool
"""
screen = screen and screen or QApplication.desktop().primaryScreen()
desktop_width = QApplication.desktop().screenGeometry(screen).width()
desktop_height = QApplication.desktop().screenGeometry(screen).height()
widget.move(desktop_width / 2 - widget.sizeHint().width() / 2, desktop_height / 2 - widget.sizeHint().height() / 2)
return True |
def configure(self):
"""
Configure the Python logging module for this file.
"""
# build a file handler for this file
handler = logging.FileHandler(self.path, delay=True)
# if we got a format string, create a formatter with it
if self._format:
handler.setFormatter(logging.Formatter(self._format))
# if we got a string for the formatter, assume it's the name of a
# formatter in the environment's config
if type(self._formatter) == str:
if self._env and self._env.config.logging.dict_config.formatters[self._formatter]:
d = self._env.config.logging.dict_config.formatters[self._formatter].to_dict()
handler.setFormatter(logging.Formatter(**d))
elif type(self._formatter) == dict:
# if it's a dict it must be the actual formatter params
handler.setFormatter(logging.Formatter(**self._formatter))
# add the file handler to whatever loggers were specified
if len(self._loggers):
for name in self._loggers:
logging.getLogger(name).addHandler(handler)
else:
# none specified, just add it to the root logger
logging.getLogger().addHandler(handler) | Configure the Python logging module for this file. | Below is the the instruction that describes the task:
### Input:
Configure the Python logging module for this file.
### Response:
def configure(self):
"""
Configure the Python logging module for this file.
"""
# build a file handler for this file
handler = logging.FileHandler(self.path, delay=True)
# if we got a format string, create a formatter with it
if self._format:
handler.setFormatter(logging.Formatter(self._format))
# if we got a string for the formatter, assume it's the name of a
# formatter in the environment's config
if type(self._formatter) == str:
if self._env and self._env.config.logging.dict_config.formatters[self._formatter]:
d = self._env.config.logging.dict_config.formatters[self._formatter].to_dict()
handler.setFormatter(logging.Formatter(**d))
elif type(self._formatter) == dict:
# if it's a dict it must be the actual formatter params
handler.setFormatter(logging.Formatter(**self._formatter))
# add the file handler to whatever loggers were specified
if len(self._loggers):
for name in self._loggers:
logging.getLogger(name).addHandler(handler)
else:
# none specified, just add it to the root logger
logging.getLogger().addHandler(handler) |
def base(self, *paths, **query_kwargs):
"""create a new url object using the current base path as a base
if you had requested /foo/bar, then this would append *paths and **query_kwargs
to /foo/bar
:example:
# current path: /foo/bar
print url # http://host.com/foo/bar
print url.base() # http://host.com/foo/bar
print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam
:param *paths: list, the paths to append to the current path without query params
:param **query_kwargs: dict, any query string params to add
"""
kwargs = self._normalize_params(*paths, **query_kwargs)
if self.path:
if "path" in kwargs:
paths = self.normalize_paths(self.path, kwargs["path"])
kwargs["path"] = "/".join(paths)
else:
kwargs["path"] = self.path
return self.create(self.root, **kwargs) | create a new url object using the current base path as a base
if you had requested /foo/bar, then this would append *paths and **query_kwargs
to /foo/bar
:example:
# current path: /foo/bar
print url # http://host.com/foo/bar
print url.base() # http://host.com/foo/bar
print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam
:param *paths: list, the paths to append to the current path without query params
:param **query_kwargs: dict, any query string params to add | Below is the the instruction that describes the task:
### Input:
create a new url object using the current base path as a base
if you had requested /foo/bar, then this would append *paths and **query_kwargs
to /foo/bar
:example:
# current path: /foo/bar
print url # http://host.com/foo/bar
print url.base() # http://host.com/foo/bar
print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam
:param *paths: list, the paths to append to the current path without query params
:param **query_kwargs: dict, any query string params to add
### Response:
def base(self, *paths, **query_kwargs):
"""create a new url object using the current base path as a base
if you had requested /foo/bar, then this would append *paths and **query_kwargs
to /foo/bar
:example:
# current path: /foo/bar
print url # http://host.com/foo/bar
print url.base() # http://host.com/foo/bar
print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam
:param *paths: list, the paths to append to the current path without query params
:param **query_kwargs: dict, any query string params to add
"""
kwargs = self._normalize_params(*paths, **query_kwargs)
if self.path:
if "path" in kwargs:
paths = self.normalize_paths(self.path, kwargs["path"])
kwargs["path"] = "/".join(paths)
else:
kwargs["path"] = self.path
return self.create(self.root, **kwargs) |
def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = {}
path = '/'.join(filter(None, [prefix, group, version]))
resources_response = load_json(self.client.request('GET', path))['resources']
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
resource, name = subresource['name'].split('/')
if not subresources.get(resource):
subresources[resource] = {}
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None)
resourceobj = Resource(
prefix=prefix,
group=group,
api_version=version,
client=self.client,
preferred=preferred,
subresources=subresources.get(resource['name']),
**resource
)
resources[resource['kind']].append(resourceobj)
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
resources[resource_list.kind].append(resource_list)
return resources | returns a dictionary of resources associated with provided (prefix, group, version) | Below is the the instruction that describes the task:
### Input:
returns a dictionary of resources associated with provided (prefix, group, version)
### Response:
def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = {}
path = '/'.join(filter(None, [prefix, group, version]))
resources_response = load_json(self.client.request('GET', path))['resources']
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
resource, name = subresource['name'].split('/')
if not subresources.get(resource):
subresources[resource] = {}
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None)
resourceobj = Resource(
prefix=prefix,
group=group,
api_version=version,
client=self.client,
preferred=preferred,
subresources=subresources.get(resource['name']),
**resource
)
resources[resource['kind']].append(resourceobj)
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
resources[resource_list.kind].append(resource_list)
return resources |
def parse(self, plist):
"""Update the builder using the provided `plist`. `plist` can
be either a Packet() or a PacketList().
"""
if not isinstance(plist, PacketList):
plist = PacketList(plist)
for pkt in plist[LLTD]:
if LLTDQueryLargeTlv in pkt:
key = "%s:%s:%d" % (pkt.real_dst, pkt.real_src, pkt.seq)
self.types_offsets[key] = (pkt[LLTDQueryLargeTlv].type,
pkt[LLTDQueryLargeTlv].offset)
elif LLTDQueryLargeTlvResp in pkt:
try:
key = "%s:%s:%d" % (pkt.real_src, pkt.real_dst, pkt.seq)
content, offset = self.types_offsets[key]
except KeyError:
continue
loc = slice(offset, offset + pkt[LLTDQueryLargeTlvResp].len)
key = "%s > %s [%s]" % (
pkt.real_src, pkt.real_dst,
LLTDQueryLargeTlv.fields_desc[0].i2s.get(content, content),
)
data = self.data.setdefault(key, array("B"))
datalen = len(data)
if datalen < loc.stop:
data.extend(array("B", b"\x00" * (loc.stop - datalen)))
data[loc] = array("B", pkt[LLTDQueryLargeTlvResp].value) | Update the builder using the provided `plist`. `plist` can
be either a Packet() or a PacketList(). | Below is the the instruction that describes the task:
### Input:
Update the builder using the provided `plist`. `plist` can
be either a Packet() or a PacketList().
### Response:
def parse(self, plist):
"""Update the builder using the provided `plist`. `plist` can
be either a Packet() or a PacketList().
"""
if not isinstance(plist, PacketList):
plist = PacketList(plist)
for pkt in plist[LLTD]:
if LLTDQueryLargeTlv in pkt:
key = "%s:%s:%d" % (pkt.real_dst, pkt.real_src, pkt.seq)
self.types_offsets[key] = (pkt[LLTDQueryLargeTlv].type,
pkt[LLTDQueryLargeTlv].offset)
elif LLTDQueryLargeTlvResp in pkt:
try:
key = "%s:%s:%d" % (pkt.real_src, pkt.real_dst, pkt.seq)
content, offset = self.types_offsets[key]
except KeyError:
continue
loc = slice(offset, offset + pkt[LLTDQueryLargeTlvResp].len)
key = "%s > %s [%s]" % (
pkt.real_src, pkt.real_dst,
LLTDQueryLargeTlv.fields_desc[0].i2s.get(content, content),
)
data = self.data.setdefault(key, array("B"))
datalen = len(data)
if datalen < loc.stop:
data.extend(array("B", b"\x00" * (loc.stop - datalen)))
data[loc] = array("B", pkt[LLTDQueryLargeTlvResp].value) |
def _parse_logging(log_values: dict, service_config: dict):
"""Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification
"""
for log_key, log_value in log_values.items():
if 'driver' in log_key:
service_config['log_driver'] = log_value
if 'options' in log_key:
service_config['log_driver_options'] = log_value | Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification | Below is the the instruction that describes the task:
### Input:
Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification
### Response:
def _parse_logging(log_values: dict, service_config: dict):
"""Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification
"""
for log_key, log_value in log_values.items():
if 'driver' in log_key:
service_config['log_driver'] = log_value
if 'options' in log_key:
service_config['log_driver_options'] = log_value |
def convert_binary_field_to_attachment(env, field_spec):
"""This method converts the 8.0 binary fields to attachments like Odoo 9.0
makes with the new attachment=True attribute. It has to be called on
post-migration script, as there's a call to get the res_name of the
target model, which is not yet loaded on pre-migration.
You need to rename the involved column in pre-migration script if you
don't want to lose your data in the process.
This method also removes after the conversion the source column for
avoiding data duplication.
This is done through Odoo ORM, because there's a lot of logic associated
with guessing MIME type, format and length, file saving in store...
that is doesn't worth to recreate it via SQL as there's not too much
performance problem.
:param env: Odoo environment
:param field_spec: A dictionary with the ORM model name as key, and as
dictionary values a tuple with:
* field name to be converted as attachment as first element.
* SQL column name that contains actual data as second element. If
the second element is None, then the column name is taken
calling `get_legacy_name` method, which is the typical technique.
"""
logger = logging.getLogger('OpenUpgrade')
attachment_model = env['ir.attachment']
for model_name in field_spec:
model = env[model_name]
for field, column in field_spec[model_name]:
if column is None:
column = openupgrade.get_legacy_name(field)
logger.info(
"Converting to attachment field {} from model {} stored in "
"column {}".format(field, model_name, column)
)
last_id = 0
while True:
env.cr.execute(
"""SELECT id, {0} FROM {1} WHERE {0} IS NOT NULL AND id > {2}
ORDER BY id LIMIT 500;
""".format(column, model._table, last_id)
)
rows = env.cr.fetchall()
if not rows:
break
logger.info(
" converting {0} items starting after {1}..."
"".format(len(rows), last_id))
for row in rows:
last_id = row[0]
data = bytes(row[1])
if data and data != 'None':
attachment_model.create({
'name': field,
'res_model': model_name,
'res_field': field,
'res_id': last_id,
'type': 'binary',
'datas': data,
})
# Remove source column for cleaning the room
env.cr.execute("ALTER TABLE {} DROP COLUMN {}".format(
model._table, column,
)) | This method converts the 8.0 binary fields to attachments like Odoo 9.0
makes with the new attachment=True attribute. It has to be called on
post-migration script, as there's a call to get the res_name of the
target model, which is not yet loaded on pre-migration.
You need to rename the involved column in pre-migration script if you
don't want to lose your data in the process.
This method also removes after the conversion the source column for
avoiding data duplication.
This is done through Odoo ORM, because there's a lot of logic associated
with guessing MIME type, format and length, file saving in store...
that is doesn't worth to recreate it via SQL as there's not too much
performance problem.
:param env: Odoo environment
:param field_spec: A dictionary with the ORM model name as key, and as
dictionary values a tuple with:
* field name to be converted as attachment as first element.
* SQL column name that contains actual data as second element. If
the second element is None, then the column name is taken
calling `get_legacy_name` method, which is the typical technique. | Below is the the instruction that describes the task:
### Input:
This method converts the 8.0 binary fields to attachments like Odoo 9.0
makes with the new attachment=True attribute. It has to be called on
post-migration script, as there's a call to get the res_name of the
target model, which is not yet loaded on pre-migration.
You need to rename the involved column in pre-migration script if you
don't want to lose your data in the process.
This method also removes after the conversion the source column for
avoiding data duplication.
This is done through Odoo ORM, because there's a lot of logic associated
with guessing MIME type, format and length, file saving in store...
that is doesn't worth to recreate it via SQL as there's not too much
performance problem.
:param env: Odoo environment
:param field_spec: A dictionary with the ORM model name as key, and as
dictionary values a tuple with:
* field name to be converted as attachment as first element.
* SQL column name that contains actual data as second element. If
the second element is None, then the column name is taken
calling `get_legacy_name` method, which is the typical technique.
### Response:
def convert_binary_field_to_attachment(env, field_spec):
"""This method converts the 8.0 binary fields to attachments like Odoo 9.0
makes with the new attachment=True attribute. It has to be called on
post-migration script, as there's a call to get the res_name of the
target model, which is not yet loaded on pre-migration.
You need to rename the involved column in pre-migration script if you
don't want to lose your data in the process.
This method also removes after the conversion the source column for
avoiding data duplication.
This is done through Odoo ORM, because there's a lot of logic associated
with guessing MIME type, format and length, file saving in store...
that is doesn't worth to recreate it via SQL as there's not too much
performance problem.
:param env: Odoo environment
:param field_spec: A dictionary with the ORM model name as key, and as
dictionary values a tuple with:
* field name to be converted as attachment as first element.
* SQL column name that contains actual data as second element. If
the second element is None, then the column name is taken
calling `get_legacy_name` method, which is the typical technique.
"""
logger = logging.getLogger('OpenUpgrade')
attachment_model = env['ir.attachment']
for model_name in field_spec:
model = env[model_name]
for field, column in field_spec[model_name]:
if column is None:
column = openupgrade.get_legacy_name(field)
logger.info(
"Converting to attachment field {} from model {} stored in "
"column {}".format(field, model_name, column)
)
last_id = 0
while True:
env.cr.execute(
"""SELECT id, {0} FROM {1} WHERE {0} IS NOT NULL AND id > {2}
ORDER BY id LIMIT 500;
""".format(column, model._table, last_id)
)
rows = env.cr.fetchall()
if not rows:
break
logger.info(
" converting {0} items starting after {1}..."
"".format(len(rows), last_id))
for row in rows:
last_id = row[0]
data = bytes(row[1])
if data and data != 'None':
attachment_model.create({
'name': field,
'res_model': model_name,
'res_field': field,
'res_id': last_id,
'type': 'binary',
'datas': data,
})
# Remove source column for cleaning the room
env.cr.execute("ALTER TABLE {} DROP COLUMN {}".format(
model._table, column,
)) |
def _list_directories_and_files(self, share_name, directory_name=None,
marker=None, max_results=None, timeout=None):
'''
Returns a list of the directories and files under the specified share.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
max_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting max_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(share_name, directory_name)
request.query = [
('restype', 'directory'),
('comp', 'list'),
('marker', _to_str(marker)),
('maxresults', _int_to_str(max_results)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_directories_and_files(response) | Returns a list of the directories and files under the specified share.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
max_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting max_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds. | Below is the the instruction that describes the task:
### Input:
Returns a list of the directories and files under the specified share.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
max_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting max_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds.
### Response:
def _list_directories_and_files(self, share_name, directory_name=None,
marker=None, max_results=None, timeout=None):
'''
Returns a list of the directories and files under the specified share.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of files to return,
including all directory elements. If the request does not specify
max_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting max_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(share_name, directory_name)
request.query = [
('restype', 'directory'),
('comp', 'list'),
('marker', _to_str(marker)),
('maxresults', _int_to_str(max_results)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_directories_and_files(response) |
def status(self):
"""
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市,
‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
"""
try:
return self.__dict__["status"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id)
) | [str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市,
‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用) | Below is the the instruction that describes the task:
### Input:
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市,
‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
### Response:
def status(self):
"""
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市,
‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
"""
try:
return self.__dict__["status"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id)
) |
def write_to_csv(self, filename=None, extension='.dat', overwrite=False,
stride=1, chunksize=None, **kw):
""" write all data to csv with numpy.savetxt
Parameters
----------
filename : str, optional
filename string, which may contain placeholders {itraj} and {stride}:
* itraj will be replaced by trajetory index
* stride is stride argument of this method
If filename is not given, it is being tried to obtain the filenames
from the data source of this iterator.
extension : str, optional, default='.dat'
filename extension of created files
overwrite : bool, optional, default=False
shall existing files be overwritten? If a file exists, this method will raise.
stride : int
omit every n'th frame
chunksize: int, default=None
how many frames to process at once
kw : dict, optional
named arguments passed into numpy.savetxt (header, seperator etc.)
Example
-------
Assume you want to save features calculated by some FeatureReader to ASCII:
>>> import numpy as np, pyemma
>>> import os
>>> from pyemma.util.files import TemporaryDirectory
>>> from pyemma.util.contexts import settings
>>> data = [np.random.random((10,3))] * 3
>>> reader = pyemma.coordinates.source(data)
>>> filename = "distances_{itraj}.dat"
>>> with TemporaryDirectory() as td, settings(show_progress_bars=False):
... out = os.path.join(td, filename)
... reader.write_to_csv(out, header='', delimiter=';')
... print(sorted(os.listdir(td)))
['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
"""
import os
if not filename:
assert hasattr(self, 'filenames')
# raise RuntimeError("could not determine filenames")
filenames = []
for f in self.filenames:
base, _ = os.path.splitext(f)
filenames.append(base + extension)
elif isinstance(filename, str):
filename = filename.replace('{stride}', str(stride))
filenames = [filename.replace('{itraj}', str(itraj)) for itraj
in range(self.number_of_trajectories())]
else:
raise TypeError("filename should be str or None")
self.logger.debug("write_to_csv, filenames=%s" % filenames)
# check files before starting to write
import errno
for f in filenames:
try:
st = os.stat(f)
raise OSError(errno.EEXIST)
except OSError as e:
if e.errno == errno.EEXIST:
if overwrite:
continue
elif e.errno == errno.ENOENT:
continue
raise
f = None
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
it = self.iterator(stride, chunk=chunksize, return_trajindex=False)
pg.register(it.n_chunks, "saving to csv")
with it, pg.context():
oldtraj = -1
for X in it:
if oldtraj != it.current_trajindex:
if f is not None:
f.close()
fn = filenames[it.current_trajindex]
self.logger.debug("opening file %s for writing csv." % fn)
f = open(fn, 'wb')
oldtraj = it.current_trajindex
np.savetxt(f, X, **kw)
f.flush()
pg.update(1, 0)
if f is not None:
f.close() | write all data to csv with numpy.savetxt
Parameters
----------
filename : str, optional
filename string, which may contain placeholders {itraj} and {stride}:
* itraj will be replaced by trajetory index
* stride is stride argument of this method
If filename is not given, it is being tried to obtain the filenames
from the data source of this iterator.
extension : str, optional, default='.dat'
filename extension of created files
overwrite : bool, optional, default=False
shall existing files be overwritten? If a file exists, this method will raise.
stride : int
omit every n'th frame
chunksize: int, default=None
how many frames to process at once
kw : dict, optional
named arguments passed into numpy.savetxt (header, seperator etc.)
Example
-------
Assume you want to save features calculated by some FeatureReader to ASCII:
>>> import numpy as np, pyemma
>>> import os
>>> from pyemma.util.files import TemporaryDirectory
>>> from pyemma.util.contexts import settings
>>> data = [np.random.random((10,3))] * 3
>>> reader = pyemma.coordinates.source(data)
>>> filename = "distances_{itraj}.dat"
>>> with TemporaryDirectory() as td, settings(show_progress_bars=False):
... out = os.path.join(td, filename)
... reader.write_to_csv(out, header='', delimiter=';')
... print(sorted(os.listdir(td)))
['distances_0.dat', 'distances_1.dat', 'distances_2.dat'] | Below is the the instruction that describes the task:
### Input:
write all data to csv with numpy.savetxt
Parameters
----------
filename : str, optional
filename string, which may contain placeholders {itraj} and {stride}:
* itraj will be replaced by trajetory index
* stride is stride argument of this method
If filename is not given, it is being tried to obtain the filenames
from the data source of this iterator.
extension : str, optional, default='.dat'
filename extension of created files
overwrite : bool, optional, default=False
shall existing files be overwritten? If a file exists, this method will raise.
stride : int
omit every n'th frame
chunksize: int, default=None
how many frames to process at once
kw : dict, optional
named arguments passed into numpy.savetxt (header, seperator etc.)
Example
-------
Assume you want to save features calculated by some FeatureReader to ASCII:
>>> import numpy as np, pyemma
>>> import os
>>> from pyemma.util.files import TemporaryDirectory
>>> from pyemma.util.contexts import settings
>>> data = [np.random.random((10,3))] * 3
>>> reader = pyemma.coordinates.source(data)
>>> filename = "distances_{itraj}.dat"
>>> with TemporaryDirectory() as td, settings(show_progress_bars=False):
... out = os.path.join(td, filename)
... reader.write_to_csv(out, header='', delimiter=';')
... print(sorted(os.listdir(td)))
['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
### Response:
def write_to_csv(self, filename=None, extension='.dat', overwrite=False,
stride=1, chunksize=None, **kw):
""" write all data to csv with numpy.savetxt
Parameters
----------
filename : str, optional
filename string, which may contain placeholders {itraj} and {stride}:
* itraj will be replaced by trajetory index
* stride is stride argument of this method
If filename is not given, it is being tried to obtain the filenames
from the data source of this iterator.
extension : str, optional, default='.dat'
filename extension of created files
overwrite : bool, optional, default=False
shall existing files be overwritten? If a file exists, this method will raise.
stride : int
omit every n'th frame
chunksize: int, default=None
how many frames to process at once
kw : dict, optional
named arguments passed into numpy.savetxt (header, seperator etc.)
Example
-------
Assume you want to save features calculated by some FeatureReader to ASCII:
>>> import numpy as np, pyemma
>>> import os
>>> from pyemma.util.files import TemporaryDirectory
>>> from pyemma.util.contexts import settings
>>> data = [np.random.random((10,3))] * 3
>>> reader = pyemma.coordinates.source(data)
>>> filename = "distances_{itraj}.dat"
>>> with TemporaryDirectory() as td, settings(show_progress_bars=False):
... out = os.path.join(td, filename)
... reader.write_to_csv(out, header='', delimiter=';')
... print(sorted(os.listdir(td)))
['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
"""
import os
if not filename:
assert hasattr(self, 'filenames')
# raise RuntimeError("could not determine filenames")
filenames = []
for f in self.filenames:
base, _ = os.path.splitext(f)
filenames.append(base + extension)
elif isinstance(filename, str):
filename = filename.replace('{stride}', str(stride))
filenames = [filename.replace('{itraj}', str(itraj)) for itraj
in range(self.number_of_trajectories())]
else:
raise TypeError("filename should be str or None")
self.logger.debug("write_to_csv, filenames=%s" % filenames)
# check files before starting to write
import errno
for f in filenames:
try:
st = os.stat(f)
raise OSError(errno.EEXIST)
except OSError as e:
if e.errno == errno.EEXIST:
if overwrite:
continue
elif e.errno == errno.ENOENT:
continue
raise
f = None
from pyemma._base.progress import ProgressReporter
pg = ProgressReporter()
it = self.iterator(stride, chunk=chunksize, return_trajindex=False)
pg.register(it.n_chunks, "saving to csv")
with it, pg.context():
oldtraj = -1
for X in it:
if oldtraj != it.current_trajindex:
if f is not None:
f.close()
fn = filenames[it.current_trajindex]
self.logger.debug("opening file %s for writing csv." % fn)
f = open(fn, 'wb')
oldtraj = it.current_trajindex
np.savetxt(f, X, **kw)
f.flush()
pg.update(1, 0)
if f is not None:
f.close() |
def rfcformat(dt, localtime=False):
"""Return the RFC822-formatted representation of a datetime object.
:param datetime dt: The datetime.
:param bool localtime: If ``True``, return the date relative to the local
timezone instead of UTC, displaying the proper offset,
e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
"""
if not localtime:
return formatdate(timegm(dt.utctimetuple()))
else:
return local_rfcformat(dt) | Return the RFC822-formatted representation of a datetime object.
:param datetime dt: The datetime.
:param bool localtime: If ``True``, return the date relative to the local
timezone instead of UTC, displaying the proper offset,
e.g. "Sun, 10 Nov 2013 08:23:45 -0600" | Below is the the instruction that describes the task:
### Input:
Return the RFC822-formatted representation of a datetime object.
:param datetime dt: The datetime.
:param bool localtime: If ``True``, return the date relative to the local
timezone instead of UTC, displaying the proper offset,
e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
### Response:
def rfcformat(dt, localtime=False):
"""Return the RFC822-formatted representation of a datetime object.
:param datetime dt: The datetime.
:param bool localtime: If ``True``, return the date relative to the local
timezone instead of UTC, displaying the proper offset,
e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
"""
if not localtime:
return formatdate(timegm(dt.utctimetuple()))
else:
return local_rfcformat(dt) |
def parse_hosts(hosts, ssh_port=None, ssh_config=None):
"""
Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects.
The information about the host are taken in this order of priority:
- host:
- from the host (string) itself.
- user:
- from the host (string) itself.
- from the `paramiko.config.SSHConfig` object.
- current logged user.
- port:
- from the function argument `port`.
- from the `paramiko.config.SSHConfig` object.
- default SSH port: 22
:param hosts: list of hosts (string). Eg: ['server1', 'user1@server2']
:param ssh_config: a `paramiko.config.SSHConfig` object.
:return: a list of `msshcopyid.Host` objects.
"""
host_list = [] # list of Host objects
current_user = getpass.getuser()
for host in hosts:
# host_info = {'hostname': 'server1', 'hashknownhosts': 'no', 'user': 'user1'}
if ssh_config is not None:
host_info = ssh_config.lookup(host)
else:
host_info = {}
# hostname & user
if '@' in host:
user, hostname = host.split('@', 1)
else:
hostname = host
user = host_info.get('user', current_user)
# port
port = ssh_port or host_info.get('port', DEFAULT_SSH_PORT)
host_list.append(msshcopyid.Host(hostname=hostname, port=port, user=user))
return host_list | Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects.
The information about the host are taken in this order of priority:
- host:
- from the host (string) itself.
- user:
- from the host (string) itself.
- from the `paramiko.config.SSHConfig` object.
- current logged user.
- port:
- from the function argument `port`.
- from the `paramiko.config.SSHConfig` object.
- default SSH port: 22
:param hosts: list of hosts (string). Eg: ['server1', 'user1@server2']
:param ssh_config: a `paramiko.config.SSHConfig` object.
:return: a list of `msshcopyid.Host` objects. | Below is the the instruction that describes the task:
### Input:
Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects.
The information about the host are taken in this order of priority:
- host:
- from the host (string) itself.
- user:
- from the host (string) itself.
- from the `paramiko.config.SSHConfig` object.
- current logged user.
- port:
- from the function argument `port`.
- from the `paramiko.config.SSHConfig` object.
- default SSH port: 22
:param hosts: list of hosts (string). Eg: ['server1', 'user1@server2']
:param ssh_config: a `paramiko.config.SSHConfig` object.
:return: a list of `msshcopyid.Host` objects.
### Response:
def parse_hosts(hosts, ssh_port=None, ssh_config=None):
"""
Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects.
The information about the host are taken in this order of priority:
- host:
- from the host (string) itself.
- user:
- from the host (string) itself.
- from the `paramiko.config.SSHConfig` object.
- current logged user.
- port:
- from the function argument `port`.
- from the `paramiko.config.SSHConfig` object.
- default SSH port: 22
:param hosts: list of hosts (string). Eg: ['server1', 'user1@server2']
:param ssh_config: a `paramiko.config.SSHConfig` object.
:return: a list of `msshcopyid.Host` objects.
"""
host_list = [] # list of Host objects
current_user = getpass.getuser()
for host in hosts:
# host_info = {'hostname': 'server1', 'hashknownhosts': 'no', 'user': 'user1'}
if ssh_config is not None:
host_info = ssh_config.lookup(host)
else:
host_info = {}
# hostname & user
if '@' in host:
user, hostname = host.split('@', 1)
else:
hostname = host
user = host_info.get('user', current_user)
# port
port = ssh_port or host_info.get('port', DEFAULT_SSH_PORT)
host_list.append(msshcopyid.Host(hostname=hostname, port=port, user=user))
return host_list |
def replace_pattern(
df,
column: str,
*,
pat: str,
repl: str,
new_column: str = None,
case: bool = True,
regex: bool = True
):
"""
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)
return df | Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true | Below is the the instruction that describes the task:
### Input:
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
### Response:
def replace_pattern(
df,
column: str,
*,
pat: str,
repl: str,
new_column: str = None,
case: bool = True,
regex: bool = True
):
"""
Replace occurrences of pattern/regex in `column` with some other string
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `pat` (*str*): character sequence or regular expression
- `repl` (*str*): replacement string
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
- `case` (*boolean*): if true, case sensitive.
- `regex` (*boolean*): default true
"""
new_column = new_column or column
df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)
return df |
def gen_configs_permutate(self,
injections_raw,
only_same_dipole_length=False,
ignore_crossed_dipoles=False,
silent=False):
""" Create measurement configurations out of a pool of current
injections. Use only the provided dipoles for potential dipole
selection. This means that we have always reciprocal measurements.
Remove quadpoles where electrodes are used both as current and voltage
dipoles.
Parameters
----------
injections_raw : Nx2 array
current injections
only_same_dipole_length : bool, optional
if True, only generate permutations for the same dipole length
ignore_crossed_dipoles : bool, optional
If True, potential dipoles will be ignored that lie between current
dipoles, e.g. 1-4 3-5. In this case it is possible to not have
full normal-reciprocal coverage.
silent: bool, optional
if True, do not print information on ignored configs (default:
False)
Returns
-------
configs : Nx4 array
quadrupoles generated out of the current injections
"""
injections = np.atleast_2d(injections_raw).astype(int)
N = injections.shape[0]
measurements = []
for injection in range(0, N):
dipole_length = np.abs(injections[injection][1] -
injections[injection][0])
# select all dipole EXCEPT for the injection dipole
for i in set(range(0, N)) - set([injection]):
test_dipole_length = np.abs(injections[i, :][1] -
injections[i, :][0])
if (only_same_dipole_length
and test_dipole_length != dipole_length):
continue
quadpole = np.array(
[injections[injection, :], injections[i, :]]).flatten()
if ignore_crossed_dipoles is True:
# check if we need to ignore this dipole
# Note: this could be wrong if electrode number are not
# ascending!
if (quadpole[2] > quadpole[0]
and quadpole[2] < quadpole[1]):
if not silent:
print('A - ignoring', quadpole)
elif (quadpole[3] > quadpole[0]
and quadpole[3] < quadpole[1]):
if not silent:
print('B - ignoring', quadpole)
else:
measurements.append(quadpole)
else:
# add very quadpole
measurements.append(quadpole)
# check and remove double use of electrodes
filtered = []
for quadpole in measurements:
if (not set(quadpole[0:2]).isdisjoint(set(quadpole[2:4]))):
if not silent:
print('Ignoring quadrupole because of ',
'repeated electrode use:', quadpole)
else:
filtered.append(quadpole)
self.add_to_configs(filtered)
return np.array(filtered) | Create measurement configurations out of a pool of current
injections. Use only the provided dipoles for potential dipole
selection. This means that we have always reciprocal measurements.
Remove quadpoles where electrodes are used both as current and voltage
dipoles.
Parameters
----------
injections_raw : Nx2 array
current injections
only_same_dipole_length : bool, optional
if True, only generate permutations for the same dipole length
ignore_crossed_dipoles : bool, optional
If True, potential dipoles will be ignored that lie between current
dipoles, e.g. 1-4 3-5. In this case it is possible to not have
full normal-reciprocal coverage.
silent: bool, optional
if True, do not print information on ignored configs (default:
False)
Returns
-------
configs : Nx4 array
quadrupoles generated out of the current injections | Below is the the instruction that describes the task:
### Input:
Create measurement configurations out of a pool of current
injections. Use only the provided dipoles for potential dipole
selection. This means that we have always reciprocal measurements.
Remove quadpoles where electrodes are used both as current and voltage
dipoles.
Parameters
----------
injections_raw : Nx2 array
current injections
only_same_dipole_length : bool, optional
if True, only generate permutations for the same dipole length
ignore_crossed_dipoles : bool, optional
If True, potential dipoles will be ignored that lie between current
dipoles, e.g. 1-4 3-5. In this case it is possible to not have
full normal-reciprocal coverage.
silent: bool, optional
if True, do not print information on ignored configs (default:
False)
Returns
-------
configs : Nx4 array
quadrupoles generated out of the current injections
### Response:
def gen_configs_permutate(self,
injections_raw,
only_same_dipole_length=False,
ignore_crossed_dipoles=False,
silent=False):
""" Create measurement configurations out of a pool of current
injections. Use only the provided dipoles for potential dipole
selection. This means that we have always reciprocal measurements.
Remove quadpoles where electrodes are used both as current and voltage
dipoles.
Parameters
----------
injections_raw : Nx2 array
current injections
only_same_dipole_length : bool, optional
if True, only generate permutations for the same dipole length
ignore_crossed_dipoles : bool, optional
If True, potential dipoles will be ignored that lie between current
dipoles, e.g. 1-4 3-5. In this case it is possible to not have
full normal-reciprocal coverage.
silent: bool, optional
if True, do not print information on ignored configs (default:
False)
Returns
-------
configs : Nx4 array
quadrupoles generated out of the current injections
"""
injections = np.atleast_2d(injections_raw).astype(int)
N = injections.shape[0]
measurements = []
for injection in range(0, N):
dipole_length = np.abs(injections[injection][1] -
injections[injection][0])
# select all dipole EXCEPT for the injection dipole
for i in set(range(0, N)) - set([injection]):
test_dipole_length = np.abs(injections[i, :][1] -
injections[i, :][0])
if (only_same_dipole_length
and test_dipole_length != dipole_length):
continue
quadpole = np.array(
[injections[injection, :], injections[i, :]]).flatten()
if ignore_crossed_dipoles is True:
# check if we need to ignore this dipole
# Note: this could be wrong if electrode number are not
# ascending!
if (quadpole[2] > quadpole[0]
and quadpole[2] < quadpole[1]):
if not silent:
print('A - ignoring', quadpole)
elif (quadpole[3] > quadpole[0]
and quadpole[3] < quadpole[1]):
if not silent:
print('B - ignoring', quadpole)
else:
measurements.append(quadpole)
else:
# add very quadpole
measurements.append(quadpole)
# check and remove double use of electrodes
filtered = []
for quadpole in measurements:
if (not set(quadpole[0:2]).isdisjoint(set(quadpole[2:4]))):
if not silent:
print('Ignoring quadrupole because of ',
'repeated electrode use:', quadpole)
else:
filtered.append(quadpole)
self.add_to_configs(filtered)
return np.array(filtered) |
def p_unrelate_statement_using_1(self, p):
'''statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name'''
p[0] = UnrelateUsingNode(from_variable_name=p[2],
to_variable_name=p[4],
rel_id=p[6],
phrase=None,
using_variable_name=p[8]) | statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name | Below is the the instruction that describes the task:
### Input:
statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name
### Response:
def p_unrelate_statement_using_1(self, p):
'''statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name'''
p[0] = UnrelateUsingNode(from_variable_name=p[2],
to_variable_name=p[4],
rel_id=p[6],
phrase=None,
using_variable_name=p[8]) |
async def update(self, db=None, data=None):
'''
Update the entire document by replacing its content with new data, retaining its primary key
'''
db = db or self.db
if data: # update model explicitely with a new data structure
# merge the current model's data with the new data
self.import_data(data)
# prepare data for database update
data = self.prepare_data()
# data = {x: ndata[x] for x in ndata if x in data or x == self.primary_key}
else:
data = self.export_data(native=True)
if self.primary_key not in data or data[self.primary_key] is None:
raise Exception('Missing object primary key')
query = {self.primary_key: self.pk}
for i in self.connection_retries():
try:
result = await db[self.get_collection_name()].find_one_and_replace(
filter=query,
replacement=data,
return_document=ReturnDocument.AFTER
)
if result:
updated_obj = self.create_model(result)
updated_obj._db = db
# emit post save
asyncio.ensure_future(post_save.send(
sender=self.__class__,
db=db,
instance=updated_obj,
created=False)
)
return updated_obj
return None
except ConnectionFailure as ex:
exceed = await self.check_reconnect_tries_and_wait(i, 'update')
if exceed:
raise ex | Update the entire document by replacing its content with new data, retaining its primary key | Below is the the instruction that describes the task:
### Input:
Update the entire document by replacing its content with new data, retaining its primary key
### Response:
async def update(self, db=None, data=None):
'''
Update the entire document by replacing its content with new data, retaining its primary key
'''
db = db or self.db
if data: # update model explicitely with a new data structure
# merge the current model's data with the new data
self.import_data(data)
# prepare data for database update
data = self.prepare_data()
# data = {x: ndata[x] for x in ndata if x in data or x == self.primary_key}
else:
data = self.export_data(native=True)
if self.primary_key not in data or data[self.primary_key] is None:
raise Exception('Missing object primary key')
query = {self.primary_key: self.pk}
for i in self.connection_retries():
try:
result = await db[self.get_collection_name()].find_one_and_replace(
filter=query,
replacement=data,
return_document=ReturnDocument.AFTER
)
if result:
updated_obj = self.create_model(result)
updated_obj._db = db
# emit post save
asyncio.ensure_future(post_save.send(
sender=self.__class__,
db=db,
instance=updated_obj,
created=False)
)
return updated_obj
return None
except ConnectionFailure as ex:
exceed = await self.check_reconnect_tries_and_wait(i, 'update')
if exceed:
raise ex |
def _parse_path(self, path):
"""Parses paths in the following formats:
url: username/project/runs/run_id
path: username/project/run_id
docker: username/project:run_id
username is optional and will fallback to the current logged in user.
"""
run = self.settings['run']
project = self.settings['project']
username = self.settings['username']
parts = path.replace("/runs/", "/").split("/")
if ":" in parts[-1]:
run = parts[-1].split(":")[-1]
parts[-1] = parts[-1].split(":")[0]
elif parts[-1]:
run = parts[-1]
if len(parts) > 1:
project = parts[1]
if username and run == project:
project = parts[0]
else:
username = parts[0]
else:
project = parts[0]
return (username, project, run) | Parses paths in the following formats:
url: username/project/runs/run_id
path: username/project/run_id
docker: username/project:run_id
username is optional and will fallback to the current logged in user. | Below is the the instruction that describes the task:
### Input:
Parses paths in the following formats:
url: username/project/runs/run_id
path: username/project/run_id
docker: username/project:run_id
username is optional and will fallback to the current logged in user.
### Response:
def _parse_path(self, path):
"""Parses paths in the following formats:
url: username/project/runs/run_id
path: username/project/run_id
docker: username/project:run_id
username is optional and will fallback to the current logged in user.
"""
run = self.settings['run']
project = self.settings['project']
username = self.settings['username']
parts = path.replace("/runs/", "/").split("/")
if ":" in parts[-1]:
run = parts[-1].split(":")[-1]
parts[-1] = parts[-1].split(":")[0]
elif parts[-1]:
run = parts[-1]
if len(parts) > 1:
project = parts[1]
if username and run == project:
project = parts[0]
else:
username = parts[0]
else:
project = parts[0]
return (username, project, run) |
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event) | Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content | Below is the the instruction that describes the task:
### Input:
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
### Response:
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event) |
def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
id = d.get('ID', None) or d.get('id', None)
external_id = d.get('external_id', None)
lon = d.get('longitude', None)
lat = d.get('latitude', None)
alt = d.get('altitude', None)
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
name = d.get('name', None)
rank = d.get('rank', None)
created_at = d.get('created_at', None)
updated_at = d.get('updated_at', None)
return Station(id, created_at, updated_at, external_id, name, lon, lat,
alt, rank) | Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result | Below is the the instruction that describes the task:
### Input:
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
### Response:
def parse_JSON(self, JSON_string):
"""
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON
data.
:param JSON_string: a raw JSON string
:type JSON_string: str
:return: a *pyowm.stationsapi30.station.Station** instance or ``None``
if no data is available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result
"""
if JSON_string is None:
raise parse_response_error.ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
try:
id = d.get('ID', None) or d.get('id', None)
external_id = d.get('external_id', None)
lon = d.get('longitude', None)
lat = d.get('latitude', None)
alt = d.get('altitude', None)
except KeyError as e:
raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e)
name = d.get('name', None)
rank = d.get('rank', None)
created_at = d.get('created_at', None)
updated_at = d.get('updated_at', None)
return Station(id, created_at, updated_at, external_id, name, lon, lat,
alt, rank) |
def _create_folder(local_folder, parent_folder_id):
"""
Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long
"""
new_folder = session.communicator.create_folder(
session.token, os.path.basename(local_folder), parent_folder_id)
return new_folder['folder_id'] | Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long | Below is the the instruction that describes the task:
### Input:
Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long
### Response:
def _create_folder(local_folder, parent_folder_id):
"""
Function for creating a remote folder and returning the id. This should be
a building block for user-level functions.
:param local_folder: full path to a local folder
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:returns: id of the remote folder that was created
:rtype: int | long
"""
new_folder = session.communicator.create_folder(
session.token, os.path.basename(local_folder), parent_folder_id)
return new_folder['folder_id'] |
def get_signals(signal_array, frame, root_or_cache, ns, multiplex_id, float_factory):
# type: (typing.Sequence[_Element], canmatrix.Frame, _DocRoot, str, _MultiplexId, typing.Callable) -> None
"""Add signals from xml to the Frame."""
global signal_rxs
group_id = 1
if signal_array is None: # Empty signalarray - nothing to do
return
for signal in signal_array:
compu_method = None
motorola = get_child(signal, "PACKING-BYTE-ORDER", root_or_cache, ns)
start_bit = get_child(signal, "START-POSITION", root_or_cache, ns)
isignal = get_child(signal, "SIGNAL", root_or_cache, ns)
if isignal is None:
isignal = get_child(signal, "I-SIGNAL", root_or_cache, ns)
if isignal is None:
isignal = get_child(signal, "I-SIGNAL-GROUP", root_or_cache, ns)
if isignal is not None:
logger.debug("get_signals: found I-SIGNAL-GROUP ")
isignal_array = find_children_by_path(isignal, "I-SIGNAL", root_or_cache, ns)
get_sys_signals(isignal, isignal_array, frame, group_id, ns)
group_id = group_id + 1
continue
if isignal is None:
logger.debug(
'Frame %s, no isignal for %s found',
frame.name, get_child(signal, "SHORT-NAME", root_or_cache, ns).text)
base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns)
signal_name = None # type: typing.Optional[str]
signal_name_elem = get_child(isignal, "LONG-NAME", root_or_cache, ns)
if signal_name_elem is not None:
signal_name_elem = get_child(signal_name_elem, "L-4", root_or_cache, ns)
if signal_name_elem is not None:
signal_name = signal_name_elem.text
system_signal = get_child(isignal, "SYSTEM-SIGNAL", root_or_cache, ns)
if system_signal is None:
logger.debug('Frame %s, signal %s has no system-signal', frame.name, isignal.tag)
if "SYSTEM-SIGNAL-GROUP" in system_signal.tag:
system_signals = find_children_by_path(system_signal, "SYSTEM-SIGNAL-REFS/SYSTEM-SIGNAL", root_or_cache, ns)
get_sys_signals(system_signal, system_signals, frame, group_id, ns)
group_id = group_id + 1
continue
length = get_child(isignal, "LENGTH", root_or_cache, ns)
if length is None:
length = get_child(system_signal, "LENGTH", root_or_cache, ns)
name = get_child(system_signal, "SHORT-NAME", root_or_cache, ns)
unit_element = get_child(isignal, "UNIT", root_or_cache, ns)
display_name = get_child(unit_element, "DISPLAY-NAME", root_or_cache, ns)
if display_name is not None:
signal_unit = display_name.text
else:
signal_unit = ""
signal_min = None # type: canmatrix.types.OptionalPhysicalValue
signal_max = None # type: canmatrix.types.OptionalPhysicalValue
receiver = [] # type: typing.List[str]
signal_description = get_element_desc(system_signal, root_or_cache, ns)
datatype = get_child(system_signal, "DATA-TYPE", root_or_cache, ns)
if datatype is None: # AR4?
data_constr = None
compu_method = None
base_type = None
for test_signal in [isignal, system_signal]:
if data_constr is None:
data_constr = get_child(test_signal, "DATA-CONSTR", root_or_cache, ns)
if compu_method is None:
compu_method = get_child(test_signal, "COMPU-METHOD", root_or_cache, ns)
if base_type is None:
base_type = get_child(test_signal, "BASE-TYPE", root_or_cache, ns)
lower = get_child(data_constr, "LOWER-LIMIT", root_or_cache, ns)
upper = get_child(data_constr, "UPPER-LIMIT", root_or_cache, ns)
encoding = None # TODO - find encoding in AR4
else:
lower = get_child(datatype, "LOWER-LIMIT", root_or_cache, ns)
upper = get_child(datatype, "UPPER-LIMIT", root_or_cache, ns)
encoding = get_child(datatype, "ENCODING", root_or_cache, ns)
if encoding is not None and (encoding.text == "SINGLE" or encoding.text == "DOUBLE"):
is_float = True
else:
is_float = False
if lower is not None and upper is not None:
signal_min = float_factory(lower.text)
signal_max = float_factory(upper.text)
datdefprops = get_child(datatype, "SW-DATA-DEF-PROPS", root_or_cache, ns)
if compu_method is None:
compu_method = get_child(datdefprops, "COMPU-METHOD", root_or_cache, ns)
if compu_method is None: # AR4
compu_method = get_child(isignal, "COMPU-METHOD", root_or_cache, ns)
base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns)
encoding = get_child(base_type, "BASE-TYPE-ENCODING", root_or_cache, ns)
if encoding is not None and encoding.text == "IEEE754":
is_float = True
if compu_method is None:
logger.debug('No Compmethod found!! - try alternate scheme 1.')
networkrep = get_child(isignal, "NETWORK-REPRESENTATION-PROPS", root_or_cache, ns)
data_def_props_var = get_child(networkrep, "SW-DATA-DEF-PROPS-VARIANTS", root_or_cache, ns)
data_def_props_cond = get_child(data_def_props_var, "SW-DATA-DEF-PROPS-CONDITIONAL", root_or_cache, ns)
if data_def_props_cond is not None:
try:
compu_method = get_child(data_def_props_cond, "COMPU-METHOD", root_or_cache, ns)
except:
logger.debug('No valid compu method found for this - check ARXML file!!')
compu_method = None
#####################################################################################################
# no found compu-method fuzzy search in systemsignal:
#####################################################################################################
if compu_method is None:
logger.debug('No Compmethod found!! - fuzzy search in syssignal.')
compu_method = get_child(system_signal, "COMPU-METHOD", root_or_cache, ns)
# decode compuMethod:
(values, factor, offset, unit_elem, const) = decode_compu_method(compu_method, root_or_cache, ns, float_factory)
if signal_min is not None:
signal_min *= factor
signal_min += offset
if signal_max is not None:
signal_max *= factor
signal_max += offset
if base_type is None:
base_type = get_child(datdefprops, "BASE-TYPE", root_or_cache, ns)
if base_type is not None:
type_name = get_element_name(base_type, ns)
if type_name[0] == 'u':
is_signed = False # unsigned
else:
is_signed = True # signed
else:
is_signed = True # signed
if unit_elem is not None:
longname = get_child(unit_elem, "LONG-NAME", root_or_cache, ns)
#####################################################################################################
# Modification to support obtaining the Signals Unit by DISPLAY-NAME. 07June16
#####################################################################################################
display_name = None
try:
display_name = get_child(unit_elem, "DISPLAY-NAME", root_or_cache, ns)
except:
logger.debug('No Unit Display name found!! - using long name')
if display_name is not None:
signal_unit = display_name.text
else:
l4 = get_child(longname, "L-4", root_or_cache, ns)
if l4 is not None:
signal_unit = l4.text
init_list = find_children_by_path(system_signal, "INIT-VALUE/VALUE", root_or_cache, ns)
if not init_list:
init_list = find_children_by_path(isignal, "INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE", root_or_cache, ns) # #AR4.2
if init_list:
initvalue = init_list[0]
else:
initvalue = None
is_little_endian = False
if motorola is not None:
if motorola.text == 'MOST-SIGNIFICANT-BYTE-LAST':
is_little_endian = True
else:
logger.debug('no name byte order for signal' + name.text)
if name is None:
logger.debug('no name for signal given')
if start_bit is None:
logger.debug('no startBit for signal given')
if length is None:
logger.debug('no length for signal given')
if start_bit is not None:
new_signal = canmatrix.Signal(
name.text,
start_bit=int(start_bit.text),
size=int(length.text),
is_little_endian=is_little_endian,
is_signed=is_signed,
factor=factor,
offset=offset,
unit=signal_unit,
receivers=receiver,
multiplex=multiplex_id,
comment=signal_description,
is_float=is_float)
if signal_min is not None:
new_signal.min = signal_min
if signal_max is not None:
new_signal.max = signal_max
if new_signal.is_little_endian == 0:
# startbit of motorola coded signals are MSB in arxml
new_signal.set_startbit(int(start_bit.text), bitNumbering=1)
# save signal, to determin receiver-ECUs for this signal later
signal_rxs[system_signal] = new_signal
if base_type is not None:
temp = get_child(base_type, "SHORT-NAME", root_or_cache, ns)
if temp is not None and "boolean" == temp.text:
new_signal.add_values(1, "TRUE")
new_signal.add_values(0, "FALSE")
if initvalue is not None and initvalue.text is not None:
initvalue.text = canmatrix.utils.guess_value(initvalue.text)
new_signal._initValue = float_factory(initvalue.text)
new_signal.add_attribute("GenSigStartValue", str(new_signal._initValue))
else:
new_signal._initValue = 0
for key, value in list(values.items()):
new_signal.add_values(key, value)
if signal_name is not None:
new_signal.add_attribute("LongName", signal_name)
frame.add_signal(new_signal) | Add signals from xml to the Frame. | Below is the the instruction that describes the task:
### Input:
Add signals from xml to the Frame.
### Response:
def get_signals(signal_array, frame, root_or_cache, ns, multiplex_id, float_factory):
# type: (typing.Sequence[_Element], canmatrix.Frame, _DocRoot, str, _MultiplexId, typing.Callable) -> None
"""Add signals from xml to the Frame."""
global signal_rxs
group_id = 1
if signal_array is None: # Empty signalarray - nothing to do
return
for signal in signal_array:
compu_method = None
motorola = get_child(signal, "PACKING-BYTE-ORDER", root_or_cache, ns)
start_bit = get_child(signal, "START-POSITION", root_or_cache, ns)
isignal = get_child(signal, "SIGNAL", root_or_cache, ns)
if isignal is None:
isignal = get_child(signal, "I-SIGNAL", root_or_cache, ns)
if isignal is None:
isignal = get_child(signal, "I-SIGNAL-GROUP", root_or_cache, ns)
if isignal is not None:
logger.debug("get_signals: found I-SIGNAL-GROUP ")
isignal_array = find_children_by_path(isignal, "I-SIGNAL", root_or_cache, ns)
get_sys_signals(isignal, isignal_array, frame, group_id, ns)
group_id = group_id + 1
continue
if isignal is None:
logger.debug(
'Frame %s, no isignal for %s found',
frame.name, get_child(signal, "SHORT-NAME", root_or_cache, ns).text)
base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns)
signal_name = None # type: typing.Optional[str]
signal_name_elem = get_child(isignal, "LONG-NAME", root_or_cache, ns)
if signal_name_elem is not None:
signal_name_elem = get_child(signal_name_elem, "L-4", root_or_cache, ns)
if signal_name_elem is not None:
signal_name = signal_name_elem.text
system_signal = get_child(isignal, "SYSTEM-SIGNAL", root_or_cache, ns)
if system_signal is None:
logger.debug('Frame %s, signal %s has no system-signal', frame.name, isignal.tag)
if "SYSTEM-SIGNAL-GROUP" in system_signal.tag:
system_signals = find_children_by_path(system_signal, "SYSTEM-SIGNAL-REFS/SYSTEM-SIGNAL", root_or_cache, ns)
get_sys_signals(system_signal, system_signals, frame, group_id, ns)
group_id = group_id + 1
continue
length = get_child(isignal, "LENGTH", root_or_cache, ns)
if length is None:
length = get_child(system_signal, "LENGTH", root_or_cache, ns)
name = get_child(system_signal, "SHORT-NAME", root_or_cache, ns)
unit_element = get_child(isignal, "UNIT", root_or_cache, ns)
display_name = get_child(unit_element, "DISPLAY-NAME", root_or_cache, ns)
if display_name is not None:
signal_unit = display_name.text
else:
signal_unit = ""
signal_min = None # type: canmatrix.types.OptionalPhysicalValue
signal_max = None # type: canmatrix.types.OptionalPhysicalValue
receiver = [] # type: typing.List[str]
signal_description = get_element_desc(system_signal, root_or_cache, ns)
datatype = get_child(system_signal, "DATA-TYPE", root_or_cache, ns)
if datatype is None: # AR4?
data_constr = None
compu_method = None
base_type = None
for test_signal in [isignal, system_signal]:
if data_constr is None:
data_constr = get_child(test_signal, "DATA-CONSTR", root_or_cache, ns)
if compu_method is None:
compu_method = get_child(test_signal, "COMPU-METHOD", root_or_cache, ns)
if base_type is None:
base_type = get_child(test_signal, "BASE-TYPE", root_or_cache, ns)
lower = get_child(data_constr, "LOWER-LIMIT", root_or_cache, ns)
upper = get_child(data_constr, "UPPER-LIMIT", root_or_cache, ns)
encoding = None # TODO - find encoding in AR4
else:
lower = get_child(datatype, "LOWER-LIMIT", root_or_cache, ns)
upper = get_child(datatype, "UPPER-LIMIT", root_or_cache, ns)
encoding = get_child(datatype, "ENCODING", root_or_cache, ns)
if encoding is not None and (encoding.text == "SINGLE" or encoding.text == "DOUBLE"):
is_float = True
else:
is_float = False
if lower is not None and upper is not None:
signal_min = float_factory(lower.text)
signal_max = float_factory(upper.text)
datdefprops = get_child(datatype, "SW-DATA-DEF-PROPS", root_or_cache, ns)
if compu_method is None:
compu_method = get_child(datdefprops, "COMPU-METHOD", root_or_cache, ns)
if compu_method is None: # AR4
compu_method = get_child(isignal, "COMPU-METHOD", root_or_cache, ns)
base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns)
encoding = get_child(base_type, "BASE-TYPE-ENCODING", root_or_cache, ns)
if encoding is not None and encoding.text == "IEEE754":
is_float = True
if compu_method is None:
logger.debug('No Compmethod found!! - try alternate scheme 1.')
networkrep = get_child(isignal, "NETWORK-REPRESENTATION-PROPS", root_or_cache, ns)
data_def_props_var = get_child(networkrep, "SW-DATA-DEF-PROPS-VARIANTS", root_or_cache, ns)
data_def_props_cond = get_child(data_def_props_var, "SW-DATA-DEF-PROPS-CONDITIONAL", root_or_cache, ns)
if data_def_props_cond is not None:
try:
compu_method = get_child(data_def_props_cond, "COMPU-METHOD", root_or_cache, ns)
except:
logger.debug('No valid compu method found for this - check ARXML file!!')
compu_method = None
#####################################################################################################
# no found compu-method fuzzy search in systemsignal:
#####################################################################################################
if compu_method is None:
logger.debug('No Compmethod found!! - fuzzy search in syssignal.')
compu_method = get_child(system_signal, "COMPU-METHOD", root_or_cache, ns)
# decode compuMethod:
(values, factor, offset, unit_elem, const) = decode_compu_method(compu_method, root_or_cache, ns, float_factory)
if signal_min is not None:
signal_min *= factor
signal_min += offset
if signal_max is not None:
signal_max *= factor
signal_max += offset
if base_type is None:
base_type = get_child(datdefprops, "BASE-TYPE", root_or_cache, ns)
if base_type is not None:
type_name = get_element_name(base_type, ns)
if type_name[0] == 'u':
is_signed = False # unsigned
else:
is_signed = True # signed
else:
is_signed = True # signed
if unit_elem is not None:
longname = get_child(unit_elem, "LONG-NAME", root_or_cache, ns)
#####################################################################################################
# Modification to support obtaining the Signals Unit by DISPLAY-NAME. 07June16
#####################################################################################################
display_name = None
try:
display_name = get_child(unit_elem, "DISPLAY-NAME", root_or_cache, ns)
except:
logger.debug('No Unit Display name found!! - using long name')
if display_name is not None:
signal_unit = display_name.text
else:
l4 = get_child(longname, "L-4", root_or_cache, ns)
if l4 is not None:
signal_unit = l4.text
init_list = find_children_by_path(system_signal, "INIT-VALUE/VALUE", root_or_cache, ns)
if not init_list:
init_list = find_children_by_path(isignal, "INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE", root_or_cache, ns) # #AR4.2
if init_list:
initvalue = init_list[0]
else:
initvalue = None
is_little_endian = False
if motorola is not None:
if motorola.text == 'MOST-SIGNIFICANT-BYTE-LAST':
is_little_endian = True
else:
logger.debug('no name byte order for signal' + name.text)
if name is None:
logger.debug('no name for signal given')
if start_bit is None:
logger.debug('no startBit for signal given')
if length is None:
logger.debug('no length for signal given')
if start_bit is not None:
new_signal = canmatrix.Signal(
name.text,
start_bit=int(start_bit.text),
size=int(length.text),
is_little_endian=is_little_endian,
is_signed=is_signed,
factor=factor,
offset=offset,
unit=signal_unit,
receivers=receiver,
multiplex=multiplex_id,
comment=signal_description,
is_float=is_float)
if signal_min is not None:
new_signal.min = signal_min
if signal_max is not None:
new_signal.max = signal_max
if new_signal.is_little_endian == 0:
# startbit of motorola coded signals are MSB in arxml
new_signal.set_startbit(int(start_bit.text), bitNumbering=1)
# save signal, to determin receiver-ECUs for this signal later
signal_rxs[system_signal] = new_signal
if base_type is not None:
temp = get_child(base_type, "SHORT-NAME", root_or_cache, ns)
if temp is not None and "boolean" == temp.text:
new_signal.add_values(1, "TRUE")
new_signal.add_values(0, "FALSE")
if initvalue is not None and initvalue.text is not None:
initvalue.text = canmatrix.utils.guess_value(initvalue.text)
new_signal._initValue = float_factory(initvalue.text)
new_signal.add_attribute("GenSigStartValue", str(new_signal._initValue))
else:
new_signal._initValue = 0
for key, value in list(values.items()):
new_signal.add_values(key, value)
if signal_name is not None:
new_signal.add_attribute("LongName", signal_name)
frame.add_signal(new_signal) |
def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode) | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode) |
def install_handler(self, event_type, handler, user_handle=None):
"""Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object)
"""
return self.visalib.install_visa_handler(self.session, event_type, handler, user_handle) | Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object) | Below is the the instruction that describes the task:
### Input:
Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object)
### Response:
def install_handler(self, event_type, handler, user_handle=None):
"""Installs handlers for event callbacks in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be installed by a client application.
:param user_handle: A value specified by an application that can be used for identifying handlers
uniquely for an event type.
:returns: user handle (a ctypes object)
"""
return self.visalib.install_visa_handler(self.session, event_type, handler, user_handle) |
def characterize_psf(self):
""" Get support size and drift polynomial for current set of params """
# there may be an issue with the support and characterization--
# it might be best to do the characterization with the same support
# as the calculated psf.
l,u = max(self.zrange[0], self.param_dict['psf-zslab']), self.zrange[1]
size_l, drift_l = self.measure_size_drift(l)
size_u, drift_u = self.measure_size_drift(u)
# must be odd for now or have a better system for getting the center
self.support = util.oddify(2*self.support_factor*size_u.astype('int'))
self.drift_poly = np.polyfit([l, u], [drift_l, drift_u], 1)
if self.cutoffval is not None:
psf, vec, size_l = self.psf_slice(l, size=51, zoffset=drift_l, getextent=True)
psf, vec, size_u = self.psf_slice(u, size=51, zoffset=drift_u, getextent=True)
ss = [np.abs(i).sum(axis=-1) for i in [size_l, size_u]]
self.support = util.oddify(util.amax(*ss)) | Get support size and drift polynomial for current set of params | Below is the the instruction that describes the task:
### Input:
Get support size and drift polynomial for current set of params
### Response:
def characterize_psf(self):
""" Get support size and drift polynomial for current set of params """
# there may be an issue with the support and characterization--
# it might be best to do the characterization with the same support
# as the calculated psf.
l,u = max(self.zrange[0], self.param_dict['psf-zslab']), self.zrange[1]
size_l, drift_l = self.measure_size_drift(l)
size_u, drift_u = self.measure_size_drift(u)
# must be odd for now or have a better system for getting the center
self.support = util.oddify(2*self.support_factor*size_u.astype('int'))
self.drift_poly = np.polyfit([l, u], [drift_l, drift_u], 1)
if self.cutoffval is not None:
psf, vec, size_l = self.psf_slice(l, size=51, zoffset=drift_l, getextent=True)
psf, vec, size_u = self.psf_slice(u, size=51, zoffset=drift_u, getextent=True)
ss = [np.abs(i).sum(axis=-1) for i in [size_l, size_u]]
self.support = util.oddify(util.amax(*ss)) |
def set_fraction(self, value):
"""Set the meter indicator. Value should be between 0 and 1."""
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height) | Set the meter indicator. Value should be between 0 and 1. | Below is the the instruction that describes the task:
### Input:
Set the meter indicator. Value should be between 0 and 1.
### Response:
def set_fraction(self, value):
"""Set the meter indicator. Value should be between 0 and 1."""
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height) |
def write_screen(self, font, color, screen_pos, text, align="left",
valign="top"):
"""Write to the screen in font.size relative coordinates."""
pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize()
text_surf = font.render(str(text), True, color)
rect = text_surf.get_rect()
if pos.x >= 0:
setattr(rect, align, pos.x)
else:
setattr(rect, align, self.surf.get_width() + pos.x)
if pos.y >= 0:
setattr(rect, valign, pos.y)
else:
setattr(rect, valign, self.surf.get_height() + pos.y)
self.surf.blit(text_surf, rect) | Write to the screen in font.size relative coordinates. | Below is the the instruction that describes the task:
### Input:
Write to the screen in font.size relative coordinates.
### Response:
def write_screen(self, font, color, screen_pos, text, align="left",
valign="top"):
"""Write to the screen in font.size relative coordinates."""
pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize()
text_surf = font.render(str(text), True, color)
rect = text_surf.get_rect()
if pos.x >= 0:
setattr(rect, align, pos.x)
else:
setattr(rect, align, self.surf.get_width() + pos.x)
if pos.y >= 0:
setattr(rect, valign, pos.y)
else:
setattr(rect, valign, self.surf.get_height() + pos.y)
self.surf.blit(text_surf, rect) |
def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''):
"""
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
"""
log.info('Get latest data for sensors. Stop with Ctrl+C.')
log.info('MACs: %s', macs)
for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device):
callback(new_data) | Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id | Below is the the instruction that describes the task:
### Input:
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
### Response:
def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''):
"""
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
"""
log.info('Get latest data for sensors. Stop with Ctrl+C.')
log.info('MACs: %s', macs)
for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device):
callback(new_data) |
def is_invertible(self,X):
"""checks if Z is invertible"""
if len(X.shape) == 2:
return X.shape[0] == X.shape[1] and np.linalg.matrix_rank(X) == X.shape[0]
else:
return False | checks if Z is invertible | Below is the the instruction that describes the task:
### Input:
checks if Z is invertible
### Response:
def is_invertible(self,X):
"""checks if Z is invertible"""
if len(X.shape) == 2:
return X.shape[0] == X.shape[1] and np.linalg.matrix_rank(X) == X.shape[0]
else:
return False |
def handle_part(self, params):
"""
Handle a client parting from channel(s).
"""
for pchannel in params.split(','):
if pchannel.strip() in self.server.channels:
# Send message to all clients in all channels user is in, and
# remove the user from the channels.
channel = self.server.channels.get(pchannel.strip())
response = ':%s PART :%s' % (self.client_ident(), pchannel)
if channel:
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.channels.pop(pchannel)
else:
_vars = self.server.servername, pchannel, pchannel
response = ':%s 403 %s :%s' % _vars
self.send_queue.append(response) | Handle a client parting from channel(s). | Below is the the instruction that describes the task:
### Input:
Handle a client parting from channel(s).
### Response:
def handle_part(self, params):
"""
Handle a client parting from channel(s).
"""
for pchannel in params.split(','):
if pchannel.strip() in self.server.channels:
# Send message to all clients in all channels user is in, and
# remove the user from the channels.
channel = self.server.channels.get(pchannel.strip())
response = ':%s PART :%s' % (self.client_ident(), pchannel)
if channel:
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.channels.pop(pchannel)
else:
_vars = self.server.servername, pchannel, pchannel
response = ':%s 403 %s :%s' % _vars
self.send_queue.append(response) |
def labels(self, leaves=True, internal=True):
'''Generator over the (non-``None``) ``Node`` labels of this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
'''
if not isinstance(leaves, bool):
raise TypeError("leaves must be a bool")
if not isinstance(internal, bool):
raise TypeError("internal must be a bool")
for node in self.traverse_preorder():
if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())):
yield node.label | Generator over the (non-``None``) ``Node`` labels of this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` | Below is the the instruction that describes the task:
### Input:
Generator over the (non-``None``) ``Node`` labels of this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
### Response:
def labels(self, leaves=True, internal=True):
'''Generator over the (non-``None``) ``Node`` labels of this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
'''
if not isinstance(leaves, bool):
raise TypeError("leaves must be a bool")
if not isinstance(internal, bool):
raise TypeError("internal must be a bool")
for node in self.traverse_preorder():
if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())):
yield node.label |
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir)) | Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
### Response:
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir)) |
def get_subclass_tree(cls, ensure_unique=True):
"""Returns all subclasses (direct and recursive) of cls."""
subclasses = []
# cls.__subclasses__() fails on classes inheriting from type
for subcls in type.__subclasses__(cls):
subclasses.append(subcls)
subclasses.extend(get_subclass_tree(subcls, ensure_unique))
return list(set(subclasses)) if ensure_unique else subclasses | Returns all subclasses (direct and recursive) of cls. | Below is the the instruction that describes the task:
### Input:
Returns all subclasses (direct and recursive) of cls.
### Response:
def get_subclass_tree(cls, ensure_unique=True):
"""Returns all subclasses (direct and recursive) of cls."""
subclasses = []
# cls.__subclasses__() fails on classes inheriting from type
for subcls in type.__subclasses__(cls):
subclasses.append(subcls)
subclasses.extend(get_subclass_tree(subcls, ensure_unique))
return list(set(subclasses)) if ensure_unique else subclasses |
def remove(self, item):
"""
See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method
"""
self.real_list.remove(item)
self._items.remove(item) | See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method | Below is the the instruction that describes the task:
### Input:
See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method
### Response:
def remove(self, item):
"""
See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method
"""
self.real_list.remove(item)
self._items.remove(item) |
def typeseq(types):
"""
Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse'
"""
ret = ""
for t in types:
ret += termcap.get(fmttypes[t])
return ret | Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse' | Below is the the instruction that describes the task:
### Input:
Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse'
### Response:
def typeseq(types):
"""
Returns an escape for a terminal text formatting type, or a list of types.
Valid types are:
* 'i' for 'italic'
* 'b' for 'bold'
* 'u' for 'underline'
* 'r' for 'reverse'
"""
ret = ""
for t in types:
ret += termcap.get(fmttypes[t])
return ret |
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == 'identical' and not utils.dict_equiv(
v.attrs, result_vars[k].attrs):
raise ValueError(
'variable %s not identical across datasets' % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError(
'variable %s not equal across datasets' % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result | Concatenate a sequence of datasets along a new or existing dimension | Below is the the instruction that describes the task:
### Input:
Concatenate a sequence of datasets along a new or existing dimension
### Response:
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == 'identical' and not utils.dict_equiv(
v.attrs, result_vars[k].attrs):
raise ValueError(
'variable %s not identical across datasets' % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError(
'variable %s not equal across datasets' % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result |
def make_set(value):
''' Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
'''
if isinstance(value, list):
value = set(value)
elif not isinstance(value, set):
value = set([value,])
return value | Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list. | Below is the the instruction that describes the task:
### Input:
Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
### Response:
def make_set(value):
''' Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
'''
if isinstance(value, list):
value = set(value)
elif not isinstance(value, set):
value = set([value,])
return value |
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
recipients = self.primaryName
return self._request_three_phase_msg(three_pc_key,
self.requested_pre_prepares,
PREPREPARE,
recipients,
stash_data) | Request preprepare | Below is the the instruction that describes the task:
### Input:
Request preprepare
### Response:
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
recipients = self.primaryName
return self._request_three_phase_msg(three_pc_key,
self.requested_pre_prepares,
PREPREPARE,
recipients,
stash_data) |
def compare_string(self, expected_str, actual_str):
"""
Returns True if the two strings are equal, False otherwise
The time taken is independent of the number of characters that match
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths
"""
if len(expected_str) != len(actual_str):
return False
result = 0
for x, y in zip(expected_str, actual_str):
result |= ord(x) ^ ord(y)
return result == 0 | Returns True if the two strings are equal, False otherwise
The time taken is independent of the number of characters that match
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths | Below is the the instruction that describes the task:
### Input:
Returns True if the two strings are equal, False otherwise
The time taken is independent of the number of characters that match
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths
### Response:
def compare_string(self, expected_str, actual_str):
"""
Returns True if the two strings are equal, False otherwise
The time taken is independent of the number of characters that match
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths
"""
if len(expected_str) != len(actual_str):
return False
result = 0
for x, y in zip(expected_str, actual_str):
result |= ord(x) ^ ord(y)
return result == 0 |
def funcNrlTcMotPred(idxPrc,
varPixX,
varPixY,
NrlMdlChunk,
varNumTP,
aryBoxCar, # aryCond
path,
varNumNrlMdls,
varNumMtDrctn,
varPar,
queOut):
"""
Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included.
"""
# # if hd5 method is used: open file for reading
# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'
# hdf5_path = os.path.join(path, filename)
# fileH = tables.openFile(hdf5_path, mode='r')
# Output array with pRF model time courses at all modelled standard
# deviations for current pixel position:
aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn),
dtype='float32')
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 1:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumLoops = varNumNrlMdls/varPar
# Vector with pRF values at which to give status feedback:
vecStatus = np.linspace(0,
varNumLoops,
num=(varStsStpSze+1),
endpoint=True)
vecStatus = np.ceil(vecStatus)
vecStatus = vecStatus.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatusPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatusPrc = np.ceil(vecStatusPrc)
vecStatusPrc = vecStatusPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through all Gauss parameters that are in this chunk
for idx, NrlMdlTrpl in enumerate(NrlMdlChunk):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Status indicator:
if varCntSts02 == vecStatus[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatusPrc[varCntSts01]) +
' % --- ' +
str(vecStatus[varCntSts01]) +
' loops out of ' +
str(varNumLoops))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# x pos of Gauss model: NrlMdlTrpl[0]
# y pos of Gauss model: NrlMdlTrpl[1]
# std of Gauss model: NrlMdlTrpl[2]
# index of tng crv model: NrlMdlTrpl[3]
varTmpX = int(np.around(NrlMdlTrpl[0], 0))
varTmpY = int(np.around(NrlMdlTrpl[1], 0))
# Create pRF model (2D):
aryGauss = funcGauss2D(varPixX,
varPixY,
varTmpX,
varTmpY,
NrlMdlTrpl[2])
# Multiply pixel-wise box car model with Gaussian pRF models:
aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# neural time course model (i.e. not yet scaled for the size of
# the pRF).
aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1))
# Normalise the nrl time course model to the size of the pRF. This
# gives us the ratio of 'activation' of the pRF at each time point,
# or, in other words, the neural time course model.
aryNrlTcTmp = np.divide(aryNrlTcTmp,
np.sum(aryGauss, axis=(0, 1)))
# Put model time courses into the function's output array:
aryOut[idx, :, :] = aryNrlTcTmp
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
aryOut,
]
queOut.put(lstOut) | Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included. | Below is the the instruction that describes the task:
### Input:
Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included.
### Response:
def funcNrlTcMotPred(idxPrc,
varPixX,
varPixY,
NrlMdlChunk,
varNumTP,
aryBoxCar, # aryCond
path,
varNumNrlMdls,
varNumMtDrctn,
varPar,
queOut):
"""
Function for creating neural time course models.
This function should be used to create neural models if different
predictors for every motion direction are included.
"""
# # if hd5 method is used: open file for reading
# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'
# hdf5_path = os.path.join(path, filename)
# fileH = tables.openFile(hdf5_path, mode='r')
# Output array with pRF model time courses at all modelled standard
# deviations for current pixel position:
aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn),
dtype='float32')
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 1:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Number of pRF models to fit:
varNumLoops = varNumNrlMdls/varPar
# Vector with pRF values at which to give status feedback:
vecStatus = np.linspace(0,
varNumLoops,
num=(varStsStpSze+1),
endpoint=True)
vecStatus = np.ceil(vecStatus)
vecStatus = vecStatus.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatusPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatusPrc = np.ceil(vecStatusPrc)
vecStatusPrc = vecStatusPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# Loop through all Gauss parameters that are in this chunk
for idx, NrlMdlTrpl in enumerate(NrlMdlChunk):
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Status indicator:
if varCntSts02 == vecStatus[varCntSts01]:
# Prepare status message:
strStsMsg = ('---------Progress: ' +
str(vecStatusPrc[varCntSts01]) +
' % --- ' +
str(vecStatus[varCntSts01]) +
' loops out of ' +
str(varNumLoops))
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# x pos of Gauss model: NrlMdlTrpl[0]
# y pos of Gauss model: NrlMdlTrpl[1]
# std of Gauss model: NrlMdlTrpl[2]
# index of tng crv model: NrlMdlTrpl[3]
varTmpX = int(np.around(NrlMdlTrpl[0], 0))
varTmpY = int(np.around(NrlMdlTrpl[1], 0))
# Create pRF model (2D):
aryGauss = funcGauss2D(varPixX,
varPixY,
varTmpX,
varTmpY,
NrlMdlTrpl[2])
# Multiply pixel-wise box car model with Gaussian pRF models:
aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'. This is essentially an unscaled version of the
# neural time course model (i.e. not yet scaled for the size of
# the pRF).
aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1))
# Normalise the nrl time course model to the size of the pRF. This
# gives us the ratio of 'activation' of the pRF at each time point,
# or, in other words, the neural time course model.
aryNrlTcTmp = np.divide(aryNrlTcTmp,
np.sum(aryGauss, axis=(0, 1)))
# Put model time courses into the function's output array:
aryOut[idx, :, :] = aryNrlTcTmp
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 1:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Output list:
lstOut = [idxPrc,
aryOut,
]
queOut.put(lstOut) |
def wrap_with_try(self, node):
"""Wrap an ast node in a 'try' node to enter debug on exception."""
handlers = []
if self.ignore_exceptions is None:
handlers.append(ast.ExceptHandler(type=None,
name=None,
body=[ast.Raise()]))
else:
ignores_nodes = self.ignore_exceptions
handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes,
ast.Load()),
name=None,
body=[ast.Raise()]))
if self.catch_exception is None or \
get_node_value(self.catch_exception) not in \
(get_node_value(ast_node)
for ast_node in self.ignore_exceptions):
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
start_debug_cmd = ast.Expr(
value=ast.Call(ast.Name("start_debugging", ast.Load()),
[], [], *call_extra_parameters))
catch_exception_type = None
if self.catch_exception is not None:
catch_exception_type = self.catch_exception
handlers.append(ast.ExceptHandler(type=catch_exception_type,
name=None,
body=[start_debug_cmd]))
try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {}
new_node = self.ast_try_except(orelse=[], body=[node],
handlers=handlers,
**try_except_extra_params)
return ast.copy_location(new_node, node) | Wrap an ast node in a 'try' node to enter debug on exception. | Below is the the instruction that describes the task:
### Input:
Wrap an ast node in a 'try' node to enter debug on exception.
### Response:
def wrap_with_try(self, node):
"""Wrap an ast node in a 'try' node to enter debug on exception."""
handlers = []
if self.ignore_exceptions is None:
handlers.append(ast.ExceptHandler(type=None,
name=None,
body=[ast.Raise()]))
else:
ignores_nodes = self.ignore_exceptions
handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes,
ast.Load()),
name=None,
body=[ast.Raise()]))
if self.catch_exception is None or \
get_node_value(self.catch_exception) not in \
(get_node_value(ast_node)
for ast_node in self.ignore_exceptions):
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
start_debug_cmd = ast.Expr(
value=ast.Call(ast.Name("start_debugging", ast.Load()),
[], [], *call_extra_parameters))
catch_exception_type = None
if self.catch_exception is not None:
catch_exception_type = self.catch_exception
handlers.append(ast.ExceptHandler(type=catch_exception_type,
name=None,
body=[start_debug_cmd]))
try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {}
new_node = self.ast_try_except(orelse=[], body=[node],
handlers=handlers,
**try_except_extra_params)
return ast.copy_location(new_node, node) |
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("CheckValue", {})
builder.data(str(self.value))
builder.end("CheckValue") | Build XML by appending to builder | Below is the the instruction that describes the task:
### Input:
Build XML by appending to builder
### Response:
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("CheckValue", {})
builder.data(str(self.value))
builder.end("CheckValue") |
def _inner(x, y, axis=-1):
"""Patched version of :func:`sporco.linalg.inner`."""
return cp.sum(x * y, axis=axis, keepdims=True) | Patched version of :func:`sporco.linalg.inner`. | Below is the the instruction that describes the task:
### Input:
Patched version of :func:`sporco.linalg.inner`.
### Response:
def _inner(x, y, axis=-1):
"""Patched version of :func:`sporco.linalg.inner`."""
return cp.sum(x * y, axis=axis, keepdims=True) |
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(*args, **kwargs) # creates ChannelNode from data in self.channel_info
_build_tree(channel, SAMPLE_TREE)
raise_for_invalid_channel(channel)
return channel | Create ChannelNode and build topic tree. | Below is the the instruction that describes the task:
### Input:
Create ChannelNode and build topic tree.
### Response:
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(*args, **kwargs) # creates ChannelNode from data in self.channel_info
_build_tree(channel, SAMPLE_TREE)
raise_for_invalid_channel(channel)
return channel |
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph | Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned. | Below is the the instruction that describes the task:
### Input:
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
### Response:
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph |
def set_position_target_local_ned_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Sets a desired vehicle position in a local north-east-down coordinate
frame. Used by an external controller to command the
vehicle (manual controller or other system).
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.set_position_target_local_ned_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) | Sets a desired vehicle position in a local north-east-down coordinate
frame. Used by an external controller to command the
vehicle (manual controller or other system).
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float) | Below is the the instruction that describes the task:
### Input:
Sets a desired vehicle position in a local north-east-down coordinate
frame. Used by an external controller to command the
vehicle (manual controller or other system).
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
### Response:
def set_position_target_local_ned_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Sets a desired vehicle position in a local north-east-down coordinate
frame. Used by an external controller to command the
vehicle (manual controller or other system).
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.set_position_target_local_ned_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) |
def user(self):
"""Creates a User object when requested."""
try:
return self._user
except AttributeError:
self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid))
return self._user | Creates a User object when requested. | Below is the the instruction that describes the task:
### Input:
Creates a User object when requested.
### Response:
def user(self):
"""Creates a User object when requested."""
try:
return self._user
except AttributeError:
self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid))
return self._user |
def set(self, name: str, value: str) -> None:
"""
重写请求中的 header, 不推荐使用
"""
name = name.casefold()
self._headers[name] = value | 重写请求中的 header, 不推荐使用 | Below is the the instruction that describes the task:
### Input:
重写请求中的 header, 不推荐使用
### Response:
def set(self, name: str, value: str) -> None:
"""
重写请求中的 header, 不推荐使用
"""
name = name.casefold()
self._headers[name] = value |
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of TMRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = TMRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TM)
ourArgNames += [
'numberOfCols', # TM
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
return argTuples | Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of TMRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function. | Below is the the instruction that describes the task:
### Input:
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of TMRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
### Response:
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of TMRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = TMRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TM)
ourArgNames += [
'numberOfCols', # TM
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
return argTuples |
def _grep_qstat(self, status_type='complete'):
""" Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone
"""
args = "qstat -e {}".format(self.id).split()
res, _ = call(args)
if res == '': return False
res = res.split('\n')[2].split()[4]
if status_type == 'complete' and res == 'C':
return True
elif status_type == 'error' and (res == 'E' or res == 'C'):
return True
elif status_type == 'running' and res == 'R':
return True
elif status_type == 'queued' and res == 'Q':
return True
elif status_type == 'gone' and 'unknown job id' in str(res).lower():
return True
else:
return False | Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone | Below is the the instruction that describes the task:
### Input:
Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone
### Response:
def _grep_qstat(self, status_type='complete'):
""" Greps qstat -e <job_id> for information from the queue.
:paramsstatus_type: complete, queued, running, error, gone
"""
args = "qstat -e {}".format(self.id).split()
res, _ = call(args)
if res == '': return False
res = res.split('\n')[2].split()[4]
if status_type == 'complete' and res == 'C':
return True
elif status_type == 'error' and (res == 'E' or res == 'C'):
return True
elif status_type == 'running' and res == 'R':
return True
elif status_type == 'queued' and res == 'Q':
return True
elif status_type == 'gone' and 'unknown job id' in str(res).lower():
return True
else:
return False |
def baba_panel_plot(
ttree,
tests,
boots,
show_tip_labels=True,
show_test_labels=True,
use_edge_lengths=False,
collapse_outgroup=False,
pct_tree_x=0.4,
pct_tree_y=0.2,
alpha=3.0,
*args,
**kwargs):
"""
signature...
"""
## create Panel plot object and set height & width
bootsarr = np.array(boots)
panel = Panel(ttree, tests, bootsarr, alpha)
if not kwargs.get("width"):
panel.kwargs["width"] = min(1000, 50*len(panel.tree))
if not kwargs.get("height"):
panel.kwargs["height"] = min(1000, 50*len(panel.tests))
## update defaults with kwargs & update size based on ntips & ntests
kwargs.update(dict(pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y))
panel.kwargs.update(kwargs)
## create a canvas and a single cartesian coord system
canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width'])
axes = canvas.cartesian(bounds=("10%", "90%", "5%", "95%"))
axes.show = False
## add panels to axes
panel.panel_tree(axes)
panel.panel_test(axes)
panel.panel_tip_labels(axes)
if isinstance(boots, np.ndarray):
panel.panel_results(axes)
return canvas, axes, panel | signature... | Below is the the instruction that describes the task:
### Input:
signature...
### Response:
def baba_panel_plot(
ttree,
tests,
boots,
show_tip_labels=True,
show_test_labels=True,
use_edge_lengths=False,
collapse_outgroup=False,
pct_tree_x=0.4,
pct_tree_y=0.2,
alpha=3.0,
*args,
**kwargs):
"""
signature...
"""
## create Panel plot object and set height & width
bootsarr = np.array(boots)
panel = Panel(ttree, tests, bootsarr, alpha)
if not kwargs.get("width"):
panel.kwargs["width"] = min(1000, 50*len(panel.tree))
if not kwargs.get("height"):
panel.kwargs["height"] = min(1000, 50*len(panel.tests))
## update defaults with kwargs & update size based on ntips & ntests
kwargs.update(dict(pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y))
panel.kwargs.update(kwargs)
## create a canvas and a single cartesian coord system
canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width'])
axes = canvas.cartesian(bounds=("10%", "90%", "5%", "95%"))
axes.show = False
## add panels to axes
panel.panel_tree(axes)
panel.panel_test(axes)
panel.panel_tip_labels(axes)
if isinstance(boots, np.ndarray):
panel.panel_results(axes)
return canvas, axes, panel |
def _element(cls):
''' find the element with controls '''
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
if len(elements) < cls.__control["index"] + 1:
raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"]))
if len(elements) > 1:
print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"]))
elm = elements[cls.__control["index"]]
cls.__control["index"] = 0
return elm | find the element with controls | Below is the the instruction that describes the task:
### Input:
find the element with controls
### Response:
def _element(cls):
''' find the element with controls '''
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
if len(elements) < cls.__control["index"] + 1:
raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"]))
if len(elements) > 1:
print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"]))
elm = elements[cls.__control["index"]]
cls.__control["index"] = 0
return elm |
def copen(fileobj, mode='rb', **kwargs):
"""Detects and opens compressed file for reading and writing.
Args:
fileobj (File): any File-like object supported by an underlying
compression algorithm
mode (unicode): mode to open fileobj with
**kwargs: keyword-arguments to pass to the compression algorithm
Returns:
File: TextWrapper if no compression, else returns appropriate
wrapper for the compression type
Example:
.. code-block:: Python
>>> from tempfile import NamedTemporaryFile
>>> # Write compressed file
>>> temp = NamedTemporaryFile(delete=False, suffix='.bz2')
>>> test_bz2 = copen(temp.name, 'wb')
>>> test_bz2.write(b'bzip2')
>>> test_bz2.close()
>>> # Read compressed bzip file
>>> test_bz2 = copen(temp.name, 'rb')
>>> test_bz2.read()
b'bzip2'
"""
algo = io.open # Only used as io.open in write mode
mode = mode.lower().strip()
modules = {} # Later populated by compression algorithms
write_mode = False if mode.lstrip('U')[0] == 'r' else True
kwargs['mode'] = mode
# Currently supported compression algorithms
modules_to_import = {
'bz2': 'BZ2File',
'gzip': 'GzipFile',
'lzma': 'LZMAFile'
}
# Dynamically import compression libraries and warn about failures
for mod, _class in modules_to_import.items():
try:
modules[_class] = getattr(import_module(mod), _class)
except (ImportError, AttributeError) as e:
modules[_class] = open
warn('Cannot process {0} files due to following error:'
'{1}{2}{1}You will need to install the {0} library to '
'properly use these files. Currently, such files will '
'open in "text" mode.'.format(mod, linesep, e))
# Write mode
if write_mode is True:
# Map file extensions to decompression classes
algo_map = {
'bz2': modules['BZ2File'],
'gz': modules['GzipFile'],
'xz': modules['LZMAFile']
}
# Determine the compression algorithm via the file extension
ext = fileobj.split('.')[-1]
try:
algo = algo_map[ext]
except KeyError:
pass
# Read mode
else:
algo = io.TextIOWrapper # Default to plaintext buffer
# Magic headers of encryption formats
file_sigs = {
b'\x42\x5a\x68': modules['BZ2File'],
b'\x1f\x8b\x08': modules['GzipFile'],
b'\xfd7zXZ\x00': modules['LZMAFile']
}
# Open the file, buffer it, and identify the compression algorithm
fileobj = io.BufferedReader(io.open(fileobj, 'rb'))
max_len = max(len(x) for x in file_sigs.keys())
start = fileobj.peek(max_len)
for sig in file_sigs.keys():
if start.startswith(sig):
algo = file_sigs[sig]
break # Stop iterating once a good signature is found
# Filter all **kwargs by the args accepted by the compression algorithm
algo_args = set(getfullargspec(algo).args)
good_args = set(kwargs.keys()).intersection(algo_args)
_kwargs = {arg: kwargs[arg] for arg in good_args}
# Open the file using parameters defined above and store in namespace
if write_mode is True:
handle = algo(fileobj, **_kwargs)
else:
try: # For algorithms that need to be explicitly given a fileobj
handle = algo(fileobj=fileobj, **_kwargs)
except TypeError: # For algorithms that detect file objects
handle = algo(fileobj, **_kwargs)
return handle | Detects and opens compressed file for reading and writing.
Args:
fileobj (File): any File-like object supported by an underlying
compression algorithm
mode (unicode): mode to open fileobj with
**kwargs: keyword-arguments to pass to the compression algorithm
Returns:
File: TextWrapper if no compression, else returns appropriate
wrapper for the compression type
Example:
.. code-block:: Python
>>> from tempfile import NamedTemporaryFile
>>> # Write compressed file
>>> temp = NamedTemporaryFile(delete=False, suffix='.bz2')
>>> test_bz2 = copen(temp.name, 'wb')
>>> test_bz2.write(b'bzip2')
>>> test_bz2.close()
>>> # Read compressed bzip file
>>> test_bz2 = copen(temp.name, 'rb')
>>> test_bz2.read()
b'bzip2' | Below is the the instruction that describes the task:
### Input:
Detects and opens compressed file for reading and writing.
Args:
fileobj (File): any File-like object supported by an underlying
compression algorithm
mode (unicode): mode to open fileobj with
**kwargs: keyword-arguments to pass to the compression algorithm
Returns:
File: TextWrapper if no compression, else returns appropriate
wrapper for the compression type
Example:
.. code-block:: Python
>>> from tempfile import NamedTemporaryFile
>>> # Write compressed file
>>> temp = NamedTemporaryFile(delete=False, suffix='.bz2')
>>> test_bz2 = copen(temp.name, 'wb')
>>> test_bz2.write(b'bzip2')
>>> test_bz2.close()
>>> # Read compressed bzip file
>>> test_bz2 = copen(temp.name, 'rb')
>>> test_bz2.read()
b'bzip2'
### Response:
def copen(fileobj, mode='rb', **kwargs):
"""Detects and opens compressed file for reading and writing.
Args:
fileobj (File): any File-like object supported by an underlying
compression algorithm
mode (unicode): mode to open fileobj with
**kwargs: keyword-arguments to pass to the compression algorithm
Returns:
File: TextWrapper if no compression, else returns appropriate
wrapper for the compression type
Example:
.. code-block:: Python
>>> from tempfile import NamedTemporaryFile
>>> # Write compressed file
>>> temp = NamedTemporaryFile(delete=False, suffix='.bz2')
>>> test_bz2 = copen(temp.name, 'wb')
>>> test_bz2.write(b'bzip2')
>>> test_bz2.close()
>>> # Read compressed bzip file
>>> test_bz2 = copen(temp.name, 'rb')
>>> test_bz2.read()
b'bzip2'
"""
algo = io.open # Only used as io.open in write mode
mode = mode.lower().strip()
modules = {} # Later populated by compression algorithms
write_mode = False if mode.lstrip('U')[0] == 'r' else True
kwargs['mode'] = mode
# Currently supported compression algorithms
modules_to_import = {
'bz2': 'BZ2File',
'gzip': 'GzipFile',
'lzma': 'LZMAFile'
}
# Dynamically import compression libraries and warn about failures
for mod, _class in modules_to_import.items():
try:
modules[_class] = getattr(import_module(mod), _class)
except (ImportError, AttributeError) as e:
modules[_class] = open
warn('Cannot process {0} files due to following error:'
'{1}{2}{1}You will need to install the {0} library to '
'properly use these files. Currently, such files will '
'open in "text" mode.'.format(mod, linesep, e))
# Write mode
if write_mode is True:
# Map file extensions to decompression classes
algo_map = {
'bz2': modules['BZ2File'],
'gz': modules['GzipFile'],
'xz': modules['LZMAFile']
}
# Determine the compression algorithm via the file extension
ext = fileobj.split('.')[-1]
try:
algo = algo_map[ext]
except KeyError:
pass
# Read mode
else:
algo = io.TextIOWrapper # Default to plaintext buffer
# Magic headers of encryption formats
file_sigs = {
b'\x42\x5a\x68': modules['BZ2File'],
b'\x1f\x8b\x08': modules['GzipFile'],
b'\xfd7zXZ\x00': modules['LZMAFile']
}
# Open the file, buffer it, and identify the compression algorithm
fileobj = io.BufferedReader(io.open(fileobj, 'rb'))
max_len = max(len(x) for x in file_sigs.keys())
start = fileobj.peek(max_len)
for sig in file_sigs.keys():
if start.startswith(sig):
algo = file_sigs[sig]
break # Stop iterating once a good signature is found
# Filter all **kwargs by the args accepted by the compression algorithm
algo_args = set(getfullargspec(algo).args)
good_args = set(kwargs.keys()).intersection(algo_args)
_kwargs = {arg: kwargs[arg] for arg in good_args}
# Open the file using parameters defined above and store in namespace
if write_mode is True:
handle = algo(fileobj, **_kwargs)
else:
try: # For algorithms that need to be explicitly given a fileobj
handle = algo(fileobj=fileobj, **_kwargs)
except TypeError: # For algorithms that detect file objects
handle = algo(fileobj, **_kwargs)
return handle |
def rgb_to_hsv(rgb):
"""
Convert an RGB color representation to an HSV color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: HSV representation of the input RGB value.
:rtype: tuple
"""
r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255
_min = min(r, g, b)
_max = max(r, g, b)
v = _max
delta = _max - _min
if _max == 0:
return 0, 0, v
s = delta / _max
if delta == 0:
delta = 1
if r == _max:
h = 60 * (((g - b) / delta) % 6)
elif g == _max:
h = 60 * (((b - r) / delta) + 2)
else:
h = 60 * (((r - g) / delta) + 4)
return round(h, 3), round(s, 3), round(v, 3) | Convert an RGB color representation to an HSV color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: HSV representation of the input RGB value.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Convert an RGB color representation to an HSV color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: HSV representation of the input RGB value.
:rtype: tuple
### Response:
def rgb_to_hsv(rgb):
"""
Convert an RGB color representation to an HSV color representation.
(r, g, b) :: r -> [0, 255]
g -> [0, 255]
b -> [0, 255]
:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.
:return: HSV representation of the input RGB value.
:rtype: tuple
"""
r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255
_min = min(r, g, b)
_max = max(r, g, b)
v = _max
delta = _max - _min
if _max == 0:
return 0, 0, v
s = delta / _max
if delta == 0:
delta = 1
if r == _max:
h = 60 * (((g - b) / delta) % 6)
elif g == _max:
h = 60 * (((b - r) / delta) + 2)
else:
h = 60 * (((r - g) / delta) + 4)
return round(h, 3), round(s, 3), round(v, 3) |
def _build_generator_list(network):
"""Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
"""
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg | Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators | Below is the the instruction that describes the task:
### Input:
Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
### Response:
def _build_generator_list(network):
"""Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
"""
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg |
def cmd_tool(args=None):
""" Command line tool to make a md5sum comparison of two .fil files. """
if 'bl' in local_host:
header_loc = '/usr/local/sigproc/bin/header' #Current location of header command in GBT.
else:
raise IOError('Script only able to run in BL systems.')
p = OptionParser()
p.set_usage('matchfils <FIL_FILE1> <FIL_FILE2>')
opts, args = p.parse_args(sys.argv[1:])
file1 = args[0]
file2 = args[1]
#------------------------------------
#Create batch script
make_batch_script()
#------------------------------------
#First checksum
headersize1 = find_header_size(file1)
file_size1 = os.path.getsize(file1)
#Strip header from file, and calculate the md5sum of the rest.
#command=['tail','-c',str(file_size1-headersize1),file1,'|','md5sum']
command=['./tail_sum.sh',file1,str(file_size1-headersize1)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum1 = out.split()[0]
print('[matchfils] Checksum is:', check_sum1)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file1]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header1 = out
print(header1)
#------------------------------------
#Second checksum
out,err = reset_outs()
headersize2 = find_header_size(file2)
file_size2 = os.path.getsize(file2)
#Strip header from file, and calculate the md5sum of the rest.
command=['./tail_sum.sh',file2,str(file_size2-headersize2)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum2 = out.split()[0]
print('[matchfils] Checksum is:', check_sum2)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file2]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header2 = out
print(header2)
#------------------------------------
#check the checksums
if check_sum1 != check_sum2:
print('[matchfils] Booo! Checksum does not match between files.')
else:
print('[matchfils] Hooray! Checksum matches between files.')
#------------------------------------
#Remove batch script
os.remove('tail_sum.sh') | Command line tool to make a md5sum comparison of two .fil files. | Below is the the instruction that describes the task:
### Input:
Command line tool to make a md5sum comparison of two .fil files.
### Response:
def cmd_tool(args=None):
""" Command line tool to make a md5sum comparison of two .fil files. """
if 'bl' in local_host:
header_loc = '/usr/local/sigproc/bin/header' #Current location of header command in GBT.
else:
raise IOError('Script only able to run in BL systems.')
p = OptionParser()
p.set_usage('matchfils <FIL_FILE1> <FIL_FILE2>')
opts, args = p.parse_args(sys.argv[1:])
file1 = args[0]
file2 = args[1]
#------------------------------------
#Create batch script
make_batch_script()
#------------------------------------
#First checksum
headersize1 = find_header_size(file1)
file_size1 = os.path.getsize(file1)
#Strip header from file, and calculate the md5sum of the rest.
#command=['tail','-c',str(file_size1-headersize1),file1,'|','md5sum']
command=['./tail_sum.sh',file1,str(file_size1-headersize1)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum1 = out.split()[0]
print('[matchfils] Checksum is:', check_sum1)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file1]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header1 = out
print(header1)
#------------------------------------
#Second checksum
out,err = reset_outs()
headersize2 = find_header_size(file2)
file_size2 = os.path.getsize(file2)
#Strip header from file, and calculate the md5sum of the rest.
command=['./tail_sum.sh',file2,str(file_size2-headersize2)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum2 = out.split()[0]
print('[matchfils] Checksum is:', check_sum2)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file2]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header2 = out
print(header2)
#------------------------------------
#check the checksums
if check_sum1 != check_sum2:
print('[matchfils] Booo! Checksum does not match between files.')
else:
print('[matchfils] Hooray! Checksum matches between files.')
#------------------------------------
#Remove batch script
os.remove('tail_sum.sh') |
def synchronized(obj):
"""
This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement).
"""
if hasattr(obj, 'synchronizable_condition'):
return obj.synchronizable_condition
elif callable(obj):
@functools.wraps(obj)
def wrapper(self, *args, **kwargs):
with self.synchronizable_condition:
return obj(self, *args, **kwargs)
return wrapper
else:
raise TypeError('expected Synchronizable instance or callable to decorate') | This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement). | Below is the the instruction that describes the task:
### Input:
This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement).
### Response:
def synchronized(obj):
"""
This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement).
"""
if hasattr(obj, 'synchronizable_condition'):
return obj.synchronizable_condition
elif callable(obj):
@functools.wraps(obj)
def wrapper(self, *args, **kwargs):
with self.synchronizable_condition:
return obj(self, *args, **kwargs)
return wrapper
else:
raise TypeError('expected Synchronizable instance or callable to decorate') |
def _hash_file(self, algo):
"""Get the hash of the given file
:param algo: The algorithm to use.
:type algo: str
:return: The hexdigest of the data.
:rtype: str
"""
# We het the algorithm function.
hash_data = getattr(hashlib, algo)()
with open(self.path, "rb") as file:
# We open an read the parsed path.
# We read the content.
content = file.read()
# We parse the content to the hash algorithm.
hash_data.update(content)
# And we extract and return the hash.
return hash_data.hexdigest() | Get the hash of the given file
:param algo: The algorithm to use.
:type algo: str
:return: The hexdigest of the data.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the hash of the given file
:param algo: The algorithm to use.
:type algo: str
:return: The hexdigest of the data.
:rtype: str
### Response:
def _hash_file(self, algo):
"""Get the hash of the given file
:param algo: The algorithm to use.
:type algo: str
:return: The hexdigest of the data.
:rtype: str
"""
# We het the algorithm function.
hash_data = getattr(hashlib, algo)()
with open(self.path, "rb") as file:
# We open an read the parsed path.
# We read the content.
content = file.read()
# We parse the content to the hash algorithm.
hash_data.update(content)
# And we extract and return the hash.
return hash_data.hexdigest() |
def build(path, query=None, fragment=''):
"""
Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url
"""
url = nstr(path)
# replace the optional arguments in the url
keys = projex.text.findkeys(path)
if keys:
if query is None:
query = {}
opts = {}
for key in keys:
opts[key] = query.pop(key, '%({})s'.format(key))
url %= opts
# add the query
if query:
if type(query) is dict:
mapped_query = {}
for key, value in query.items():
mapped_query[nstr(key)] = nstr(value)
query_str = urllib.urlencode(mapped_query)
else:
query_str = nstr(query)
url += '?' + query_str
# include the fragment
if fragment:
url += '#' + fragment
return url | Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url | Below is the the instruction that describes the task:
### Input:
Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url
### Response:
def build(path, query=None, fragment=''):
"""
Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url
"""
url = nstr(path)
# replace the optional arguments in the url
keys = projex.text.findkeys(path)
if keys:
if query is None:
query = {}
opts = {}
for key in keys:
opts[key] = query.pop(key, '%({})s'.format(key))
url %= opts
# add the query
if query:
if type(query) is dict:
mapped_query = {}
for key, value in query.items():
mapped_query[nstr(key)] = nstr(value)
query_str = urllib.urlencode(mapped_query)
else:
query_str = nstr(query)
url += '?' + query_str
# include the fragment
if fragment:
url += '#' + fragment
return url |
def add_observer(self, o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
self.observers[component_type].add(o) | Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass | Below is the the instruction that describes the task:
### Input:
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
### Response:
def add_observer(self, o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
self.observers[component_type].add(o) |
def this_and_prev(iterable):
"""Walk an iterable, returning the current and previous items
as a two-tuple."""
try:
item = next(iterable)
while True:
next_item = next(iterable)
yield item, next_item
item = next_item
except StopIteration:
return | Walk an iterable, returning the current and previous items
as a two-tuple. | Below is the the instruction that describes the task:
### Input:
Walk an iterable, returning the current and previous items
as a two-tuple.
### Response:
def this_and_prev(iterable):
"""Walk an iterable, returning the current and previous items
as a two-tuple."""
try:
item = next(iterable)
while True:
next_item = next(iterable)
yield item, next_item
item = next_item
except StopIteration:
return |
def monitor(app):
"""Set up application monitoring."""
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
click.clear()
click.echo(header)
click.echo("\nExperiment {}\n".format(app))
click.echo(summary)
time.sleep(10) | Set up application monitoring. | Below is the the instruction that describes the task:
### Input:
Set up application monitoring.
### Response:
def monitor(app):
"""Set up application monitoring."""
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
click.clear()
click.echo(header)
click.echo("\nExperiment {}\n".format(app))
click.echo(summary)
time.sleep(10) |
def boundary_difference_exponential(graph, xxx_todo_changeme4):
r"""
Boundary term processing adjacent voxels difference value using an exponential relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
Finds all edges between all neighbours of the image and uses their difference in
intensity values as edge weight.
The weights are normalized using an exponential function and a smoothing factor
:math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its
ideal settings differ greatly from application to application.
The weights between two neighbouring voxels :math:`(p, q)` is then computed as
.. math::
w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}}
, for which :math:`w(p, q) \in (0, 1]` holds true.
When the created edge weights should be weighted according to the slice distance,
provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights
computed for the corresponding direction are divided by the respective slice
thickness. Set this parameter to `False` for equally weighted edges.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
original_image : ndarray
The original image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the original image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
original image.
"""
(original_image, sigma, spacing) = xxx_todo_changeme4
original_image = scipy.asarray(original_image)
def boundary_term_exponential(intensities):
"""
Implementation of a exponential boundary term computation over an array.
"""
# apply exp-(x**2/sigma**2)
intensities = scipy.power(intensities, 2)
intensities /= math.pow(sigma, 2)
intensities *= -1
intensities = scipy.exp(intensities)
intensities[intensities <= 0] = sys.float_info.min
return intensities
__skeleton_difference(graph, original_image, boundary_term_exponential, spacing) | r"""
Boundary term processing adjacent voxels difference value using an exponential relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
Finds all edges between all neighbours of the image and uses their difference in
intensity values as edge weight.
The weights are normalized using an exponential function and a smoothing factor
:math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its
ideal settings differ greatly from application to application.
The weights between two neighbouring voxels :math:`(p, q)` is then computed as
.. math::
w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}}
, for which :math:`w(p, q) \in (0, 1]` holds true.
When the created edge weights should be weighted according to the slice distance,
provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights
computed for the corresponding direction are divided by the respective slice
thickness. Set this parameter to `False` for equally weighted edges.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
original_image : ndarray
The original image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the original image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
original image. | Below is the the instruction that describes the task:
### Input:
r"""
Boundary term processing adjacent voxels difference value using an exponential relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
Finds all edges between all neighbours of the image and uses their difference in
intensity values as edge weight.
The weights are normalized using an exponential function and a smoothing factor
:math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its
ideal settings differ greatly from application to application.
The weights between two neighbouring voxels :math:`(p, q)` is then computed as
.. math::
w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}}
, for which :math:`w(p, q) \in (0, 1]` holds true.
When the created edge weights should be weighted according to the slice distance,
provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights
computed for the corresponding direction are divided by the respective slice
thickness. Set this parameter to `False` for equally weighted edges.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
original_image : ndarray
The original image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the original image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
original image.
### Response:
def boundary_difference_exponential(graph, xxx_todo_changeme4):
r"""
Boundary term processing adjacent voxels difference value using an exponential relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
Finds all edges between all neighbours of the image and uses their difference in
intensity values as edge weight.
The weights are normalized using an exponential function and a smoothing factor
:math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its
ideal settings differ greatly from application to application.
The weights between two neighbouring voxels :math:`(p, q)` is then computed as
.. math::
w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}}
, for which :math:`w(p, q) \in (0, 1]` holds true.
When the created edge weights should be weighted according to the slice distance,
provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights
computed for the corresponding direction are divided by the respective slice
thickness. Set this parameter to `False` for equally weighted edges.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
original_image : ndarray
The original image.
sigma : float
The sigma parameter to use in the boundary term.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the original image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
original image.
"""
(original_image, sigma, spacing) = xxx_todo_changeme4
original_image = scipy.asarray(original_image)
def boundary_term_exponential(intensities):
"""
Implementation of a exponential boundary term computation over an array.
"""
# apply exp-(x**2/sigma**2)
intensities = scipy.power(intensities, 2)
intensities /= math.pow(sigma, 2)
intensities *= -1
intensities = scipy.exp(intensities)
intensities[intensities <= 0] = sys.float_info.min
return intensities
__skeleton_difference(graph, original_image, boundary_term_exponential, spacing) |
def get_module(self, module_name):
"""Return loaded module from the given name."""
try:
return self.module[module_name]
except KeyError:
return sys.modules[module_name] | Return loaded module from the given name. | Below is the the instruction that describes the task:
### Input:
Return loaded module from the given name.
### Response:
def get_module(self, module_name):
"""Return loaded module from the given name."""
try:
return self.module[module_name]
except KeyError:
return sys.modules[module_name] |
def _init_metadata(self):
"""stub"""
self._start_timestamp_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'start_timestamp'),
'element_label': 'start timestamp',
'instructions': 'enter an integer number of seconds for the start time',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'INTEGER',
'minimum_integer': 0,
'maximum_integer': None,
'integer_set': [],
'default_integer_values': [0]
}
self._end_timestamp_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'end_timestamp'),
'element_label': 'end timestamp',
'instructions': 'enter an integer number of seconds for the end time',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'INTEGER',
'minimum_integer': 0,
'maximum_integer': None,
'integer_set': [],
'default_integer_values': [0]
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
self._start_timestamp_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'start_timestamp'),
'element_label': 'start timestamp',
'instructions': 'enter an integer number of seconds for the start time',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'INTEGER',
'minimum_integer': 0,
'maximum_integer': None,
'integer_set': [],
'default_integer_values': [0]
}
self._end_timestamp_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'end_timestamp'),
'element_label': 'end timestamp',
'instructions': 'enter an integer number of seconds for the end time',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'syntax': 'INTEGER',
'minimum_integer': 0,
'maximum_integer': None,
'integer_set': [],
'default_integer_values': [0]
} |
def poisson(lam=1, shape=_Null, dtype=_Null, **kwargs):
"""Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
"""
return _random_helper(_internal._random_poisson, _internal._sample_poisson,
[lam], shape, dtype, kwargs) | Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. | Below is the the instruction that describes the task:
### Input:
Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
### Response:
def poisson(lam=1, shape=_Null, dtype=_Null, **kwargs):
"""Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
"""
return _random_helper(_internal._random_poisson, _internal._sample_poisson,
[lam], shape, dtype, kwargs) |
def _zoom_rows(self, zoom):
"""Zooms grid rows"""
self.grid.SetDefaultRowSize(self.grid.std_row_size * zoom,
resizeExistingRows=True)
self.grid.SetRowLabelSize(self.grid.row_label_size * zoom)
for row, tab in self.code_array.row_heights:
if tab == self.grid.current_table and \
row < self.grid.code_array.shape[0]:
base_row_width = self.code_array.row_heights[(row, tab)]
if base_row_width is None:
base_row_width = self.grid.GetDefaultRowSize()
zoomed_row_size = base_row_width * zoom
self.grid.SetRowSize(row, zoomed_row_size) | Zooms grid rows | Below is the the instruction that describes the task:
### Input:
Zooms grid rows
### Response:
def _zoom_rows(self, zoom):
"""Zooms grid rows"""
self.grid.SetDefaultRowSize(self.grid.std_row_size * zoom,
resizeExistingRows=True)
self.grid.SetRowLabelSize(self.grid.row_label_size * zoom)
for row, tab in self.code_array.row_heights:
if tab == self.grid.current_table and \
row < self.grid.code_array.shape[0]:
base_row_width = self.code_array.row_heights[(row, tab)]
if base_row_width is None:
base_row_width = self.grid.GetDefaultRowSize()
zoomed_row_size = base_row_width * zoom
self.grid.SetRowSize(row, zoomed_row_size) |
def awd_lstm_lm_split(model:nn.Module) -> List[nn.Module]:
"Split a RNN `model` in groups for differential learning rates."
groups = [[rnn, dp] for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
return groups + [[model[0].encoder, model[0].encoder_dp, model[1]]] | Split a RNN `model` in groups for differential learning rates. | Below is the the instruction that describes the task:
### Input:
Split a RNN `model` in groups for differential learning rates.
### Response:
def awd_lstm_lm_split(model:nn.Module) -> List[nn.Module]:
"Split a RNN `model` in groups for differential learning rates."
groups = [[rnn, dp] for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
return groups + [[model[0].encoder, model[0].encoder_dp, model[1]]] |
def create_vmss(access_token, subscription_id, resource_group, vmss_name, vm_size, capacity,
publisher, offer, sku, version, subnet_id, location, be_pool_id=None,
lb_pool_id=None, storage_type='Standard_LRS', username='azure', password=None,
public_key=None, overprovision=True, upgrade_policy='Manual',
public_ip_per_vm=False):
'''Create virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'.
capacity (int): Number of VMs in the scale set. 0-1000.
publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'.
offer (str): VM image offer. E.g. 'WindowsServer'.
sku (str): VM image sku. E.g. '2016-Datacenter'.
version (str): VM image version. E.g. 'latest'.
subnet_id (str): Resource id of a subnet.
location (str): Azure data center location. E.g. westus.
be_pool_id (str): Resource id of a backend NAT pool.
lb_pool_id (str): Resource id of a load balancer pool.
storage_type (str): Optional storage type. Default 'Standard_LRS'.
username (str): Optional user name. Default is 'azure'.
password (str): Optional password. Default is None (not required if using public_key).
public_key (str): Optional public key. Default is None (not required if using password,
e.g. on Windows).
overprovision (bool): Optional. Enable overprovisioning of VMs. Default True.
upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling.
Default 'Manual'.
public_ip_per_vm (bool): Optional. Set public IP per VM. Default False.
Returns:
HTTP response. JSON body of the virtual machine scale set properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
vmss_body = {'location': location}
vmss_sku = {'name': vm_size, 'tier': 'Standard', 'capacity': capacity}
vmss_body['sku'] = vmss_sku
properties = {'overprovision': overprovision}
properties['upgradePolicy'] = {'mode': upgrade_policy}
os_profile = {'computerNamePrefix': vmss_name}
os_profile['adminUsername'] = username
if password is not None:
os_profile['adminPassword'] = password
if public_key is not None:
if password is None:
disable_pswd = True
else:
disable_pswd = False
linux_config = {'disablePasswordAuthentication': disable_pswd}
pub_key = {'path': '/home/' + username + '/.ssh/authorized_keys'}
pub_key['keyData'] = public_key
linux_config['ssh'] = {'publicKeys': [pub_key]}
os_profile['linuxConfiguration'] = linux_config
vm_profile = {'osProfile': os_profile}
os_disk = {'createOption': 'fromImage'}
os_disk['managedDisk'] = {'storageAccountType': storage_type}
os_disk['caching'] = 'ReadWrite'
storage_profile = {'osDisk': os_disk}
storage_profile['imageReference'] = \
{'publisher': publisher, 'offer': offer, 'sku': sku, 'version': version}
vm_profile['storageProfile'] = storage_profile
nic = {'name': vmss_name}
ip_config = {'name': vmss_name}
ip_properties = {'subnet': {'id': subnet_id}}
if be_pool_id is not None:
ip_properties['loadBalancerBackendAddressPools'] = [{'id': be_pool_id}]
if lb_pool_id is not None:
ip_properties['loadBalancerInboundNatPools'] = [{'id': lb_pool_id}]
if public_ip_per_vm is True:
ip_properties['publicIpAddressConfiguration'] = {
'name': 'pubip', 'properties': {'idleTimeoutInMinutes': 15}}
ip_config['properties'] = ip_properties
nic['properties'] = {'primary': True, 'ipConfigurations': [ip_config]}
network_profile = {'networkInterfaceConfigurations': [nic]}
vm_profile['networkProfile'] = network_profile
properties['virtualMachineProfile'] = vm_profile
vmss_body['properties'] = properties
body = json.dumps(vmss_body)
return do_put(endpoint, body, access_token) | Create virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'.
capacity (int): Number of VMs in the scale set. 0-1000.
publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'.
offer (str): VM image offer. E.g. 'WindowsServer'.
sku (str): VM image sku. E.g. '2016-Datacenter'.
version (str): VM image version. E.g. 'latest'.
subnet_id (str): Resource id of a subnet.
location (str): Azure data center location. E.g. westus.
be_pool_id (str): Resource id of a backend NAT pool.
lb_pool_id (str): Resource id of a load balancer pool.
storage_type (str): Optional storage type. Default 'Standard_LRS'.
username (str): Optional user name. Default is 'azure'.
password (str): Optional password. Default is None (not required if using public_key).
public_key (str): Optional public key. Default is None (not required if using password,
e.g. on Windows).
overprovision (bool): Optional. Enable overprovisioning of VMs. Default True.
upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling.
Default 'Manual'.
public_ip_per_vm (bool): Optional. Set public IP per VM. Default False.
Returns:
HTTP response. JSON body of the virtual machine scale set properties. | Below is the the instruction that describes the task:
### Input:
Create virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'.
capacity (int): Number of VMs in the scale set. 0-1000.
publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'.
offer (str): VM image offer. E.g. 'WindowsServer'.
sku (str): VM image sku. E.g. '2016-Datacenter'.
version (str): VM image version. E.g. 'latest'.
subnet_id (str): Resource id of a subnet.
location (str): Azure data center location. E.g. westus.
be_pool_id (str): Resource id of a backend NAT pool.
lb_pool_id (str): Resource id of a load balancer pool.
storage_type (str): Optional storage type. Default 'Standard_LRS'.
username (str): Optional user name. Default is 'azure'.
password (str): Optional password. Default is None (not required if using public_key).
public_key (str): Optional public key. Default is None (not required if using password,
e.g. on Windows).
overprovision (bool): Optional. Enable overprovisioning of VMs. Default True.
upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling.
Default 'Manual'.
public_ip_per_vm (bool): Optional. Set public IP per VM. Default False.
Returns:
HTTP response. JSON body of the virtual machine scale set properties.
### Response:
def create_vmss(access_token, subscription_id, resource_group, vmss_name, vm_size, capacity,
publisher, offer, sku, version, subnet_id, location, be_pool_id=None,
lb_pool_id=None, storage_type='Standard_LRS', username='azure', password=None,
public_key=None, overprovision=True, upgrade_policy='Manual',
public_ip_per_vm=False):
'''Create virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the new scale set.
vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'.
capacity (int): Number of VMs in the scale set. 0-1000.
publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'.
offer (str): VM image offer. E.g. 'WindowsServer'.
sku (str): VM image sku. E.g. '2016-Datacenter'.
version (str): VM image version. E.g. 'latest'.
subnet_id (str): Resource id of a subnet.
location (str): Azure data center location. E.g. westus.
be_pool_id (str): Resource id of a backend NAT pool.
lb_pool_id (str): Resource id of a load balancer pool.
storage_type (str): Optional storage type. Default 'Standard_LRS'.
username (str): Optional user name. Default is 'azure'.
password (str): Optional password. Default is None (not required if using public_key).
public_key (str): Optional public key. Default is None (not required if using password,
e.g. on Windows).
overprovision (bool): Optional. Enable overprovisioning of VMs. Default True.
upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling.
Default 'Manual'.
public_ip_per_vm (bool): Optional. Set public IP per VM. Default False.
Returns:
HTTP response. JSON body of the virtual machine scale set properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
vmss_body = {'location': location}
vmss_sku = {'name': vm_size, 'tier': 'Standard', 'capacity': capacity}
vmss_body['sku'] = vmss_sku
properties = {'overprovision': overprovision}
properties['upgradePolicy'] = {'mode': upgrade_policy}
os_profile = {'computerNamePrefix': vmss_name}
os_profile['adminUsername'] = username
if password is not None:
os_profile['adminPassword'] = password
if public_key is not None:
if password is None:
disable_pswd = True
else:
disable_pswd = False
linux_config = {'disablePasswordAuthentication': disable_pswd}
pub_key = {'path': '/home/' + username + '/.ssh/authorized_keys'}
pub_key['keyData'] = public_key
linux_config['ssh'] = {'publicKeys': [pub_key]}
os_profile['linuxConfiguration'] = linux_config
vm_profile = {'osProfile': os_profile}
os_disk = {'createOption': 'fromImage'}
os_disk['managedDisk'] = {'storageAccountType': storage_type}
os_disk['caching'] = 'ReadWrite'
storage_profile = {'osDisk': os_disk}
storage_profile['imageReference'] = \
{'publisher': publisher, 'offer': offer, 'sku': sku, 'version': version}
vm_profile['storageProfile'] = storage_profile
nic = {'name': vmss_name}
ip_config = {'name': vmss_name}
ip_properties = {'subnet': {'id': subnet_id}}
if be_pool_id is not None:
ip_properties['loadBalancerBackendAddressPools'] = [{'id': be_pool_id}]
if lb_pool_id is not None:
ip_properties['loadBalancerInboundNatPools'] = [{'id': lb_pool_id}]
if public_ip_per_vm is True:
ip_properties['publicIpAddressConfiguration'] = {
'name': 'pubip', 'properties': {'idleTimeoutInMinutes': 15}}
ip_config['properties'] = ip_properties
nic['properties'] = {'primary': True, 'ipConfigurations': [ip_config]}
network_profile = {'networkInterfaceConfigurations': [nic]}
vm_profile['networkProfile'] = network_profile
properties['virtualMachineProfile'] = vm_profile
vmss_body['properties'] = properties
body = json.dumps(vmss_body)
return do_put(endpoint, body, access_token) |
def _do_search(conf):
'''
Builds connection and search arguments, performs the LDAP search and
formats the results as a dictionary appropriate for pillar use.
'''
# Build LDAP connection args
connargs = {}
for name in ['server', 'port', 'tls', 'binddn', 'bindpw', 'anonymous']:
connargs[name] = _config(name, conf)
if connargs['binddn'] and connargs['bindpw']:
connargs['anonymous'] = False
# Build search args
try:
_filter = conf['filter']
except KeyError:
raise SaltInvocationError('missing filter')
_dn = _config('dn', conf)
scope = _config('scope', conf)
_lists = _config('lists', conf) or []
_attrs = _config('attrs', conf) or []
_dict_key_attr = _config('dict_key_attr', conf, 'dn')
attrs = _lists + _attrs + [_dict_key_attr]
if not attrs:
attrs = None
# Perform the search
try:
result = __salt__['ldap.search'](_filter, _dn, scope, attrs,
**connargs)['results']
except IndexError: # we got no results for this search
log.debug('LDAP search returned no results for filter %s', _filter)
result = {}
except Exception:
log.critical(
'Failed to retrieve pillar data from LDAP:\n', exc_info=True
)
return {}
return result | Builds connection and search arguments, performs the LDAP search and
formats the results as a dictionary appropriate for pillar use. | Below is the the instruction that describes the task:
### Input:
Builds connection and search arguments, performs the LDAP search and
formats the results as a dictionary appropriate for pillar use.
### Response:
def _do_search(conf):
'''
Builds connection and search arguments, performs the LDAP search and
formats the results as a dictionary appropriate for pillar use.
'''
# Build LDAP connection args
connargs = {}
for name in ['server', 'port', 'tls', 'binddn', 'bindpw', 'anonymous']:
connargs[name] = _config(name, conf)
if connargs['binddn'] and connargs['bindpw']:
connargs['anonymous'] = False
# Build search args
try:
_filter = conf['filter']
except KeyError:
raise SaltInvocationError('missing filter')
_dn = _config('dn', conf)
scope = _config('scope', conf)
_lists = _config('lists', conf) or []
_attrs = _config('attrs', conf) or []
_dict_key_attr = _config('dict_key_attr', conf, 'dn')
attrs = _lists + _attrs + [_dict_key_attr]
if not attrs:
attrs = None
# Perform the search
try:
result = __salt__['ldap.search'](_filter, _dn, scope, attrs,
**connargs)['results']
except IndexError: # we got no results for this search
log.debug('LDAP search returned no results for filter %s', _filter)
result = {}
except Exception:
log.critical(
'Failed to retrieve pillar data from LDAP:\n', exc_info=True
)
return {}
return result |
def copy_inner(self, scope):
"""Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents)
"""
if self.tokens[1]:
tokens = [u.copy() if u else u for u in self.tokens[1]]
out = [p for p in tokens if p]
utility.rename(out, scope, Block)
return out
return None | Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents) | Below is the the instruction that describes the task:
### Input:
Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents)
### Response:
def copy_inner(self, scope):
"""Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents)
"""
if self.tokens[1]:
tokens = [u.copy() if u else u for u in self.tokens[1]]
out = [p for p in tokens if p]
utility.rename(out, scope, Block)
return out
return None |
def save(self, filename=None, directory=None):
"""Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file.
"""
if filename is not None:
self.filename = filename
if directory is not None:
self.directory = directory
filepath = self.filepath
tools.mkdirs(filepath)
data = text_type(self.source)
with io.open(filepath, 'w', encoding=self.encoding) as fd:
fd.write(data)
if not data.endswith(u'\n'):
fd.write(u'\n')
return filepath | Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file. | Below is the the instruction that describes the task:
### Input:
Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file.
### Response:
def save(self, filename=None, directory=None):
"""Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file.
"""
if filename is not None:
self.filename = filename
if directory is not None:
self.directory = directory
filepath = self.filepath
tools.mkdirs(filepath)
data = text_type(self.source)
with io.open(filepath, 'w', encoding=self.encoding) as fd:
fd.write(data)
if not data.endswith(u'\n'):
fd.write(u'\n')
return filepath |
def newline(self):
"""Write eol, then start new line."""
self.write_str(self.eol)
self.room = self.maxlinelen | Write eol, then start new line. | Below is the the instruction that describes the task:
### Input:
Write eol, then start new line.
### Response:
def newline(self):
"""Write eol, then start new line."""
self.write_str(self.eol)
self.room = self.maxlinelen |
def script_to_html(script_file, html_file, args=None, unknown_args=None):
'''
Convert sos file to html format with syntax highlighting, and
either save the output either to a HTML file or view it in a broaser.
This converter accepts additional parameters --style or pygments
styles, --linenos for displaying linenumbers, and a parameter --raw
to embed a URL to the raw sos file.
'''
from jinja2 import Environment, PackageLoader, select_autoescape
environment = Environment(
loader=PackageLoader('sos', 'templates'),
autoescape=select_autoescape(['html', 'xml']))
template = environment.get_template(
args.template if args and hasattr(args, 'template') and args.template
else 'sos_script.tpl')
with open(script_file) as script:
content = script.read()
# for backward compatibility
if args and hasattr(args, 'raw'):
args.url = args.raw
context = {
'filename': script_file,
'basename': os.path.basename(script_file),
'script': content,
'sos_version': __version__,
'linenos': args.linenos if args and hasattr(args, 'linenos') else True,
'url': args.url if args and hasattr(args, 'url') else '',
'theme': args.style if args and hasattr(args, 'style') else 'default',
}
html_content = template.render(context)
if html_file is None:
if args and args.view:
# write to a temp file
import tempfile
html_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.html').name
with open(html_file, 'w') as out:
out.write(html_content)
else:
sys.stdout.write(html_content)
else:
with open(html_file, 'w') as out:
out.write(html_content)
env.logger.info(f'SoS script saved to {html_file}')
#
if args and args.view:
import webbrowser
url = f'file://{os.path.abspath(html_file)}'
env.logger.info(f'Viewing {url} in a browser')
webbrowser.open(url, new=2)
# in case the html file is temporary, give the browser sometime to load it
time.sleep(2) | Convert sos file to html format with syntax highlighting, and
either save the output either to a HTML file or view it in a broaser.
This converter accepts additional parameters --style or pygments
styles, --linenos for displaying linenumbers, and a parameter --raw
to embed a URL to the raw sos file. | Below is the the instruction that describes the task:
### Input:
Convert sos file to html format with syntax highlighting, and
either save the output either to a HTML file or view it in a broaser.
This converter accepts additional parameters --style or pygments
styles, --linenos for displaying linenumbers, and a parameter --raw
to embed a URL to the raw sos file.
### Response:
def script_to_html(script_file, html_file, args=None, unknown_args=None):
'''
Convert sos file to html format with syntax highlighting, and
either save the output either to a HTML file or view it in a broaser.
This converter accepts additional parameters --style or pygments
styles, --linenos for displaying linenumbers, and a parameter --raw
to embed a URL to the raw sos file.
'''
from jinja2 import Environment, PackageLoader, select_autoescape
environment = Environment(
loader=PackageLoader('sos', 'templates'),
autoescape=select_autoescape(['html', 'xml']))
template = environment.get_template(
args.template if args and hasattr(args, 'template') and args.template
else 'sos_script.tpl')
with open(script_file) as script:
content = script.read()
# for backward compatibility
if args and hasattr(args, 'raw'):
args.url = args.raw
context = {
'filename': script_file,
'basename': os.path.basename(script_file),
'script': content,
'sos_version': __version__,
'linenos': args.linenos if args and hasattr(args, 'linenos') else True,
'url': args.url if args and hasattr(args, 'url') else '',
'theme': args.style if args and hasattr(args, 'style') else 'default',
}
html_content = template.render(context)
if html_file is None:
if args and args.view:
# write to a temp file
import tempfile
html_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.html').name
with open(html_file, 'w') as out:
out.write(html_content)
else:
sys.stdout.write(html_content)
else:
with open(html_file, 'w') as out:
out.write(html_content)
env.logger.info(f'SoS script saved to {html_file}')
#
if args and args.view:
import webbrowser
url = f'file://{os.path.abspath(html_file)}'
env.logger.info(f'Viewing {url} in a browser')
webbrowser.open(url, new=2)
# in case the html file is temporary, give the browser sometime to load it
time.sleep(2) |
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed)
totalTimeout: float,
retryWait: float=0.1,
acceptableExceptions=None,
acceptableFails: int=0,
override_timeout_limit=False):
# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will
# exhaust the entire timeout.
"""
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
"""
start = time.perf_counter()
def remaining():
return totalTimeout + start - time.perf_counter()
funcNames = []
others = 0
fails = 0
rem = None
for cf in coroFuncs:
if len(funcNames) < 2:
funcNames.append(get_func_name(cf))
else:
others += 1
# noinspection PyBroadException
try:
rem = remaining()
if rem <= 0:
break
await eventually(cf,
retryWait=retryWait,
timeout=rem,
acceptableExceptions=acceptableExceptions,
verbose=True,
override_timeout_limit=override_timeout_limit)
except Exception as ex:
if acceptableExceptions and type(ex) not in acceptableExceptions:
raise
fails += 1
logger.debug("a coro {} with args {} timed out without succeeding; fail count: "
"{}, acceptable: {}".
format(get_func_name(cf), get_func_args(cf), fails, acceptableFails))
if fails > acceptableFails:
raise
if rem is not None and rem <= 0:
fails += 1
if fails > acceptableFails:
err = 'All checks could not complete successfully since total timeout ' \
'expired {} sec ago'.format(-1 * rem if rem < 0 else 0)
raise EventuallyTimeoutException(err)
if others:
funcNames.append("and {} others".format(others))
desc = ", ".join(funcNames)
logger.debug("{} succeeded with {:.2f} seconds to spare".
format(desc, remaining())) | :param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return: | Below is the the instruction that describes the task:
### Input:
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
### Response:
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed)
totalTimeout: float,
retryWait: float=0.1,
acceptableExceptions=None,
acceptableFails: int=0,
override_timeout_limit=False):
# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will
# exhaust the entire timeout.
"""
:param coroFuncs: iterable of no-arg functions
:param totalTimeout:
:param retryWait:
:param acceptableExceptions:
:param acceptableFails: how many of the passed in coroutines can
ultimately fail and still be ok
:return:
"""
start = time.perf_counter()
def remaining():
return totalTimeout + start - time.perf_counter()
funcNames = []
others = 0
fails = 0
rem = None
for cf in coroFuncs:
if len(funcNames) < 2:
funcNames.append(get_func_name(cf))
else:
others += 1
# noinspection PyBroadException
try:
rem = remaining()
if rem <= 0:
break
await eventually(cf,
retryWait=retryWait,
timeout=rem,
acceptableExceptions=acceptableExceptions,
verbose=True,
override_timeout_limit=override_timeout_limit)
except Exception as ex:
if acceptableExceptions and type(ex) not in acceptableExceptions:
raise
fails += 1
logger.debug("a coro {} with args {} timed out without succeeding; fail count: "
"{}, acceptable: {}".
format(get_func_name(cf), get_func_args(cf), fails, acceptableFails))
if fails > acceptableFails:
raise
if rem is not None and rem <= 0:
fails += 1
if fails > acceptableFails:
err = 'All checks could not complete successfully since total timeout ' \
'expired {} sec ago'.format(-1 * rem if rem < 0 else 0)
raise EventuallyTimeoutException(err)
if others:
funcNames.append("and {} others".format(others))
desc = ", ".join(funcNames)
logger.debug("{} succeeded with {:.2f} seconds to spare".
format(desc, remaining())) |
def editHook(self, repo_user, repo_name, hook_id, name, config,
events=None, add_events=None, remove_events=None, active=None):
"""
PATCH /repos/:owner/:repo/hooks/:id
:param hook_id: Id of the hook.
:param name: The name of the service that is being called.
:param config: A Hash containing key/value pairs to provide settings
for this hook.
"""
post = dict(
name=name,
config=config,
)
if events is not None:
post['events'] = events
if add_events is not None:
post['add_events'] = add_events
if remove_events is not None:
post['remove_events'] = remove_events
if active is not None:
post['active'] = active
return self.api.makeRequest(
['repos', repo_user, repo_name, 'hooks', str(hook_id)],
method='PATCH',
post=post,
) | PATCH /repos/:owner/:repo/hooks/:id
:param hook_id: Id of the hook.
:param name: The name of the service that is being called.
:param config: A Hash containing key/value pairs to provide settings
for this hook. | Below is the the instruction that describes the task:
### Input:
PATCH /repos/:owner/:repo/hooks/:id
:param hook_id: Id of the hook.
:param name: The name of the service that is being called.
:param config: A Hash containing key/value pairs to provide settings
for this hook.
### Response:
def editHook(self, repo_user, repo_name, hook_id, name, config,
events=None, add_events=None, remove_events=None, active=None):
"""
PATCH /repos/:owner/:repo/hooks/:id
:param hook_id: Id of the hook.
:param name: The name of the service that is being called.
:param config: A Hash containing key/value pairs to provide settings
for this hook.
"""
post = dict(
name=name,
config=config,
)
if events is not None:
post['events'] = events
if add_events is not None:
post['add_events'] = add_events
if remove_events is not None:
post['remove_events'] = remove_events
if active is not None:
post['active'] = active
return self.api.makeRequest(
['repos', repo_user, repo_name, 'hooks', str(hook_id)],
method='PATCH',
post=post,
) |
def _collection_html_response(resources, start=0, stop=20):
"""Return the HTML representation of the collection *resources*.
:param list resources: list of :class:`sandman.model.Model`s to render
:rtype: :class:`flask.Response`
"""
return make_response(render_template(
'collection.html',
resources=resources[start:stop])) | Return the HTML representation of the collection *resources*.
:param list resources: list of :class:`sandman.model.Model`s to render
:rtype: :class:`flask.Response` | Below is the the instruction that describes the task:
### Input:
Return the HTML representation of the collection *resources*.
:param list resources: list of :class:`sandman.model.Model`s to render
:rtype: :class:`flask.Response`
### Response:
def _collection_html_response(resources, start=0, stop=20):
"""Return the HTML representation of the collection *resources*.
:param list resources: list of :class:`sandman.model.Model`s to render
:rtype: :class:`flask.Response`
"""
return make_response(render_template(
'collection.html',
resources=resources[start:stop])) |
def ipv6_prefix_to_mask(prefix):
"""
ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str
"""
if prefix > 128 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv6")
else:
mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1)
f = 15 # 0xf or 0b1111
hex_mask_str = ''
for i in range(0, 32):
hex_mask_str = format((mask & f), 'x') + hex_mask_str
mask = mask >> 4
if i != 31 and i & 3 == 3:
hex_mask_str = ':' + hex_mask_str
return hex_mask_str | ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str | Below is the the instruction that describes the task:
### Input:
ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str
### Response:
def ipv6_prefix_to_mask(prefix):
"""
ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str
"""
if prefix > 128 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv6")
else:
mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1)
f = 15 # 0xf or 0b1111
hex_mask_str = ''
for i in range(0, 32):
hex_mask_str = format((mask & f), 'x') + hex_mask_str
mask = mask >> 4
if i != 31 and i & 3 == 3:
hex_mask_str = ':' + hex_mask_str
return hex_mask_str |
def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
""" configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
"""
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) | configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation | Below is the the instruction that describes the task:
### Input:
configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
### Response:
def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
""" configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
"""
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) |
def run_script(self, script_name, keys=None, args=None):
"""
Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``.
"""
return self._scripts[script_name](keys, args) | Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``. | Below is the the instruction that describes the task:
### Input:
Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``.
### Response:
def run_script(self, script_name, keys=None, args=None):
"""
Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``.
"""
return self._scripts[script_name](keys, args) |
def removeNotification(self, notificationId):
"""Destroy a notification, hiding it first if it currently shown to the user."""
fn = self.function_table.removeNotification
result = fn(notificationId)
return result | Destroy a notification, hiding it first if it currently shown to the user. | Below is the the instruction that describes the task:
### Input:
Destroy a notification, hiding it first if it currently shown to the user.
### Response:
def removeNotification(self, notificationId):
"""Destroy a notification, hiding it first if it currently shown to the user."""
fn = self.function_table.removeNotification
result = fn(notificationId)
return result |
def unix_time(dt=None, as_int=False):
"""Generate a unix style timestamp (in seconds)"""
if dt is None:
dt = datetime.datetime.utcnow()
if type(dt) is datetime.date:
dt = date_to_datetime(dt)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
if as_int:
return int(delta.total_seconds())
return delta.total_seconds() | Generate a unix style timestamp (in seconds) | Below is the the instruction that describes the task:
### Input:
Generate a unix style timestamp (in seconds)
### Response:
def unix_time(dt=None, as_int=False):
"""Generate a unix style timestamp (in seconds)"""
if dt is None:
dt = datetime.datetime.utcnow()
if type(dt) is datetime.date:
dt = date_to_datetime(dt)
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
if as_int:
return int(delta.total_seconds())
return delta.total_seconds() |
def user_names_like(cls, user_name, db_session=None):
"""
fetch users with similar names using LIKE clause
:param user_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model)
query = query.filter(
sa.func.lower(cls.model.user_name).like((user_name or "").lower())
)
query = query.order_by(cls.model.user_name)
# q = q.options(sa.orm.eagerload('groups'))
return query | fetch users with similar names using LIKE clause
:param user_name:
:param db_session:
:return: | Below is the the instruction that describes the task:
### Input:
fetch users with similar names using LIKE clause
:param user_name:
:param db_session:
:return:
### Response:
def user_names_like(cls, user_name, db_session=None):
"""
fetch users with similar names using LIKE clause
:param user_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.model)
query = query.filter(
sa.func.lower(cls.model.user_name).like((user_name or "").lower())
)
query = query.order_by(cls.model.user_name)
# q = q.options(sa.orm.eagerload('groups'))
return query |
def match_tokens(ast_tokens, ast_types):
"""
Verify that each token in order does match the expected types.
The list provided by `get_tokens` does have three more elements
at the beginning of the list which should be always the same
for a condition (Module and Expr). Those are automatically
added first to the final list of expected types so you don't have
to specify it yourself each time.
>>> tokens = Condition.get_tokens('2 == 3')
>>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num])
True
Args:
ast_entries (list): list of AST tokens parsers previously.
ast_types (list): list of expected AST types.
Returns:
bool: when all tokes match the expected types
"""
ast_final_types = [ast.Module, ast.Expr] + ast_types
return all(isinstance(ast_token, ast_type)
for ast_token, ast_type in zip(ast_tokens, ast_final_types)) | Verify that each token in order does match the expected types.
The list provided by `get_tokens` does have three more elements
at the beginning of the list which should be always the same
for a condition (Module and Expr). Those are automatically
added first to the final list of expected types so you don't have
to specify it yourself each time.
>>> tokens = Condition.get_tokens('2 == 3')
>>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num])
True
Args:
ast_entries (list): list of AST tokens parsers previously.
ast_types (list): list of expected AST types.
Returns:
bool: when all tokes match the expected types | Below is the the instruction that describes the task:
### Input:
Verify that each token in order does match the expected types.
The list provided by `get_tokens` does have three more elements
at the beginning of the list which should be always the same
for a condition (Module and Expr). Those are automatically
added first to the final list of expected types so you don't have
to specify it yourself each time.
>>> tokens = Condition.get_tokens('2 == 3')
>>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num])
True
Args:
ast_entries (list): list of AST tokens parsers previously.
ast_types (list): list of expected AST types.
Returns:
bool: when all tokes match the expected types
### Response:
def match_tokens(ast_tokens, ast_types):
"""
Verify that each token in order does match the expected types.
The list provided by `get_tokens` does have three more elements
at the beginning of the list which should be always the same
for a condition (Module and Expr). Those are automatically
added first to the final list of expected types so you don't have
to specify it yourself each time.
>>> tokens = Condition.get_tokens('2 == 3')
>>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num])
True
Args:
ast_entries (list): list of AST tokens parsers previously.
ast_types (list): list of expected AST types.
Returns:
bool: when all tokes match the expected types
"""
ast_final_types = [ast.Module, ast.Expr] + ast_types
return all(isinstance(ast_token, ast_type)
for ast_token, ast_type in zip(ast_tokens, ast_final_types)) |
def get_data(name='sms-spam', nrows=None, limit=None):
""" Load data from a json, csv, or txt file if it exists in the data dir.
References:
[cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp)
[cities](http://download.geonames.org/export/dump/cities.zip)
[cities_us](http://download.geonames.org/export/dump/cities_us.zip)
>>> from nlpia.data.loaders import get_data
>>> words = get_data('words_ubuntu_us')
>>> len(words)
99171
>>> list(words[:8])
['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"]
>>> get_data('ubuntu_dialog_test').iloc[0]
Context i think we could import the old comments via r...
Utterance basically each xfree86 upload will NOT force u...
Name: 0, dtype: object
>>> get_data('imdb_test').info()
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9)
Data columns (total 3 columns):
url 20 non-null object
rating 20 non-null int64
text 20 non-null object
dtypes: int64(1), object(2)
memory usage: 809.0+ bytes
"""
nrows = nrows or limit
if name in BIG_URLS:
logger.info('Downloading {}'.format(name))
filepaths = download_unzip(name, normalize_filenames=True)
logger.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths))
filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name]
logger.debug('nlpia.loaders.get_data.filepath=' + str(filepath))
filepathlow = filepath.lower()
if len(BIG_URLS[name]) >= 4:
kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {}
return BIG_URLS[name][3](filepath, **kwargs)
if filepathlow.endswith('.w2v.txt'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.gz'):
try:
filepath = ensure_open(filepath)
except: # noqa
pass
if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow):
return read_json(filepath)
if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'):
try:
return pd.read_table(filepath)
except: # noqa
pass
if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'):
try:
return read_csv(filepath)
except: # noqa
pass
if filepathlow.endswith('.txt'):
try:
return read_txt(filepath)
except (TypeError, UnicodeError):
pass
return filepaths[name]
elif name in DATASET_NAME2FILENAME:
return read_named_csv(name, nrows=nrows)
elif name in DATA_NAMES:
return read_named_csv(DATA_NAMES[name], nrows=nrows)
elif os.path.isfile(name):
return read_named_csv(name, nrows=nrows)
elif os.path.isfile(os.path.join(DATA_PATH, name)):
return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows)
msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format(
name, DATA_PATH, BIGDATA_PATH)
msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES))
logger.error(msg)
raise IOError(msg) | Load data from a json, csv, or txt file if it exists in the data dir.
References:
[cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp)
[cities](http://download.geonames.org/export/dump/cities.zip)
[cities_us](http://download.geonames.org/export/dump/cities_us.zip)
>>> from nlpia.data.loaders import get_data
>>> words = get_data('words_ubuntu_us')
>>> len(words)
99171
>>> list(words[:8])
['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"]
>>> get_data('ubuntu_dialog_test').iloc[0]
Context i think we could import the old comments via r...
Utterance basically each xfree86 upload will NOT force u...
Name: 0, dtype: object
>>> get_data('imdb_test').info()
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9)
Data columns (total 3 columns):
url 20 non-null object
rating 20 non-null int64
text 20 non-null object
dtypes: int64(1), object(2)
memory usage: 809.0+ bytes | Below is the the instruction that describes the task:
### Input:
Load data from a json, csv, or txt file if it exists in the data dir.
References:
[cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp)
[cities](http://download.geonames.org/export/dump/cities.zip)
[cities_us](http://download.geonames.org/export/dump/cities_us.zip)
>>> from nlpia.data.loaders import get_data
>>> words = get_data('words_ubuntu_us')
>>> len(words)
99171
>>> list(words[:8])
['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"]
>>> get_data('ubuntu_dialog_test').iloc[0]
Context i think we could import the old comments via r...
Utterance basically each xfree86 upload will NOT force u...
Name: 0, dtype: object
>>> get_data('imdb_test').info()
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9)
Data columns (total 3 columns):
url 20 non-null object
rating 20 non-null int64
text 20 non-null object
dtypes: int64(1), object(2)
memory usage: 809.0+ bytes
### Response:
def get_data(name='sms-spam', nrows=None, limit=None):
""" Load data from a json, csv, or txt file if it exists in the data dir.
References:
[cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp)
[cities](http://download.geonames.org/export/dump/cities.zip)
[cities_us](http://download.geonames.org/export/dump/cities_us.zip)
>>> from nlpia.data.loaders import get_data
>>> words = get_data('words_ubuntu_us')
>>> len(words)
99171
>>> list(words[:8])
['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"]
>>> get_data('ubuntu_dialog_test').iloc[0]
Context i think we could import the old comments via r...
Utterance basically each xfree86 upload will NOT force u...
Name: 0, dtype: object
>>> get_data('imdb_test').info()
<class 'pandas.core.frame.DataFrame'>
MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9)
Data columns (total 3 columns):
url 20 non-null object
rating 20 non-null int64
text 20 non-null object
dtypes: int64(1), object(2)
memory usage: 809.0+ bytes
"""
nrows = nrows or limit
if name in BIG_URLS:
logger.info('Downloading {}'.format(name))
filepaths = download_unzip(name, normalize_filenames=True)
logger.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths))
filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name]
logger.debug('nlpia.loaders.get_data.filepath=' + str(filepath))
filepathlow = filepath.lower()
if len(BIG_URLS[name]) >= 4:
kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {}
return BIG_URLS[name][3](filepath, **kwargs)
if filepathlow.endswith('.w2v.txt'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.gz'):
try:
filepath = ensure_open(filepath)
except: # noqa
pass
if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow):
return read_json(filepath)
if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'):
try:
return pd.read_table(filepath)
except: # noqa
pass
if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'):
try:
return read_csv(filepath)
except: # noqa
pass
if filepathlow.endswith('.txt'):
try:
return read_txt(filepath)
except (TypeError, UnicodeError):
pass
return filepaths[name]
elif name in DATASET_NAME2FILENAME:
return read_named_csv(name, nrows=nrows)
elif name in DATA_NAMES:
return read_named_csv(DATA_NAMES[name], nrows=nrows)
elif os.path.isfile(name):
return read_named_csv(name, nrows=nrows)
elif os.path.isfile(os.path.join(DATA_PATH, name)):
return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows)
msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format(
name, DATA_PATH, BIGDATA_PATH)
msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES))
logger.error(msg)
raise IOError(msg) |
def get_objective_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveLookupSession) - an
``ObjectiveLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.*
"""
if not self.supports_objective_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveLookupSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the objective lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveLookupSession) - an
``ObjectiveLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the objective lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveLookupSession) - an
``ObjectiveLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.*
### Response:
def get_objective_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveLookupSession) - an
``ObjectiveLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.*
"""
if not self.supports_objective_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveLookupSession(proxy=proxy, runtime=self._runtime) |
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False):
"""
Format and print the output of ListAll
:param lines:
:param resources:
:param aws_config:
:param template:
:param arguments:
:param nodup:
:return:
"""
for line in lines:
output = []
for resource in resources:
current_path = resource.split('.')
outline = line[1]
for key in line[2]:
outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True))
output.append(outline)
output = '\n'.join(line for line in sorted(set(output)))
template = template.replace(line[0], output)
for (i, argument) in enumerate(arguments):
template = template.replace('_ARG_%d_' % i, argument)
return template | Format and print the output of ListAll
:param lines:
:param resources:
:param aws_config:
:param template:
:param arguments:
:param nodup:
:return: | Below is the the instruction that describes the task:
### Input:
Format and print the output of ListAll
:param lines:
:param resources:
:param aws_config:
:param template:
:param arguments:
:param nodup:
:return:
### Response:
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False):
"""
Format and print the output of ListAll
:param lines:
:param resources:
:param aws_config:
:param template:
:param arguments:
:param nodup:
:return:
"""
for line in lines:
output = []
for resource in resources:
current_path = resource.split('.')
outline = line[1]
for key in line[2]:
outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True))
output.append(outline)
output = '\n'.join(line for line in sorted(set(output)))
template = template.replace(line[0], output)
for (i, argument) in enumerate(arguments):
template = template.replace('_ARG_%d_' % i, argument)
return template |
def get_inflators(target_year):
'''
Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement
à partir des masses de comptabilité nationale et des masses de consommation de bdf.
'''
data_year = find_nearest_inferior(data_years, target_year)
inflators_bdf_to_cn = get_inflators_bdf_to_cn(data_year)
inflators_cn_to_cn = get_inflators_cn_to_cn(target_year)
ratio_by_variable = dict()
for key in inflators_cn_to_cn.keys():
ratio_by_variable[key] = inflators_bdf_to_cn[key] * inflators_cn_to_cn[key]
return ratio_by_variable | Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement
à partir des masses de comptabilité nationale et des masses de consommation de bdf. | Below is the the instruction that describes the task:
### Input:
Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement
à partir des masses de comptabilité nationale et des masses de consommation de bdf.
### Response:
def get_inflators(target_year):
'''
Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement
à partir des masses de comptabilité nationale et des masses de consommation de bdf.
'''
data_year = find_nearest_inferior(data_years, target_year)
inflators_bdf_to_cn = get_inflators_bdf_to_cn(data_year)
inflators_cn_to_cn = get_inflators_cn_to_cn(target_year)
ratio_by_variable = dict()
for key in inflators_cn_to_cn.keys():
ratio_by_variable[key] = inflators_bdf_to_cn[key] * inflators_cn_to_cn[key]
return ratio_by_variable |
def new_app(self, App, prefix=None, callable=None, **params):
"""Invoke this method in the :meth:`build` method as many times
as the number of :class:`Application` required by this
:class:`MultiApp`.
:param App: an :class:`Application` class.
:param prefix: The prefix to use for the application,
the prefix is appended to
the application :ref:`config parameters <settings>` and to the
application name. Each call to this method must use a different
value of for this parameter. It can be ``None``.
:param callable: optional callable (function of object) used during
initialisation of *App* (the :class:`Application.callable`).
:param params: additional key-valued parameters used when creating
an instance of *App*.
:return: a tuple used by the :meth:`apps` method.
"""
params.update(self.cfg.params)
params.pop('name', None) # remove the name
prefix = prefix or ''
if not prefix and '' in self._apps:
prefix = App.name or App.__name__.lower()
if not prefix:
name = self.name
cfg = App.create_config(params, name=name)
else:
name = '%s_%s' % (prefix, self.name)
cfg = App.create_config(params, prefix=prefix, name=name)
# Add the config entry to the multi app config if not available
for k in cfg.settings:
if k not in self.cfg.settings:
self.cfg.settings[k] = cfg.settings[k]
return new_app(prefix, (App, name, callable, cfg)) | Invoke this method in the :meth:`build` method as many times
as the number of :class:`Application` required by this
:class:`MultiApp`.
:param App: an :class:`Application` class.
:param prefix: The prefix to use for the application,
the prefix is appended to
the application :ref:`config parameters <settings>` and to the
application name. Each call to this method must use a different
value of for this parameter. It can be ``None``.
:param callable: optional callable (function of object) used during
initialisation of *App* (the :class:`Application.callable`).
:param params: additional key-valued parameters used when creating
an instance of *App*.
:return: a tuple used by the :meth:`apps` method. | Below is the the instruction that describes the task:
### Input:
Invoke this method in the :meth:`build` method as many times
as the number of :class:`Application` required by this
:class:`MultiApp`.
:param App: an :class:`Application` class.
:param prefix: The prefix to use for the application,
the prefix is appended to
the application :ref:`config parameters <settings>` and to the
application name. Each call to this method must use a different
value of for this parameter. It can be ``None``.
:param callable: optional callable (function of object) used during
initialisation of *App* (the :class:`Application.callable`).
:param params: additional key-valued parameters used when creating
an instance of *App*.
:return: a tuple used by the :meth:`apps` method.
### Response:
def new_app(self, App, prefix=None, callable=None, **params):
"""Invoke this method in the :meth:`build` method as many times
as the number of :class:`Application` required by this
:class:`MultiApp`.
:param App: an :class:`Application` class.
:param prefix: The prefix to use for the application,
the prefix is appended to
the application :ref:`config parameters <settings>` and to the
application name. Each call to this method must use a different
value of for this parameter. It can be ``None``.
:param callable: optional callable (function of object) used during
initialisation of *App* (the :class:`Application.callable`).
:param params: additional key-valued parameters used when creating
an instance of *App*.
:return: a tuple used by the :meth:`apps` method.
"""
params.update(self.cfg.params)
params.pop('name', None) # remove the name
prefix = prefix or ''
if not prefix and '' in self._apps:
prefix = App.name or App.__name__.lower()
if not prefix:
name = self.name
cfg = App.create_config(params, name=name)
else:
name = '%s_%s' % (prefix, self.name)
cfg = App.create_config(params, prefix=prefix, name=name)
# Add the config entry to the multi app config if not available
for k in cfg.settings:
if k not in self.cfg.settings:
self.cfg.settings[k] = cfg.settings[k]
return new_app(prefix, (App, name, callable, cfg)) |
def string_presenter(self, dumper, data):
"""Presenter to force yaml.dump to use multi-line string style."""
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data) | Presenter to force yaml.dump to use multi-line string style. | Below is the the instruction that describes the task:
### Input:
Presenter to force yaml.dump to use multi-line string style.
### Response:
def string_presenter(self, dumper, data):
"""Presenter to force yaml.dump to use multi-line string style."""
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
else:
return dumper.represent_scalar('tag:yaml.org,2002:str', data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.