repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1358-L1375
def set_group(self, group): """Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group size of each group. Returns ------- self : Dataset Dataset with set group. """ self.group = group if self.handle is not None and group is not None: group = list_to_1d_numpy(group, np.int32, name='group') self.set_field('group', group) return self
[ "def", "set_group", "(", "self", ",", "group", ")", ":", "self", ".", "group", "=", "group", "if", "self", ".", "handle", "is", "not", "None", "and", "group", "is", "not", "None", ":", "group", "=", "list_to_1d_numpy", "(", "group", ",", "np", ".", "int32", ",", "name", "=", "'group'", ")", "self", ".", "set_field", "(", "'group'", ",", "group", ")", "return", "self" ]
Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group size of each group. Returns ------- self : Dataset Dataset with set group.
[ "Set", "group", "size", "of", "Dataset", "(", "used", "for", "ranking", ")", "." ]
python
train
29.166667
berkerpeksag/astor
astor/source_repr.py
https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/source_repr.py#L177-L210
def delimiter_groups(line, begin_delim=begin_delim, end_delim=end_delim): """Split a line into alternating groups. The first group cannot have a line feed inserted, the next one can, etc. """ text = [] line = iter(line) while True: # First build and yield an unsplittable group for item in line: text.append(item) if item in begin_delim: break if not text: break yield text # Now build and yield a splittable group level = 0 text = [] for item in line: if item in begin_delim: level += 1 elif item in end_delim: level -= 1 if level < 0: yield text text = [item] break text.append(item) else: assert not text, text break
[ "def", "delimiter_groups", "(", "line", ",", "begin_delim", "=", "begin_delim", ",", "end_delim", "=", "end_delim", ")", ":", "text", "=", "[", "]", "line", "=", "iter", "(", "line", ")", "while", "True", ":", "# First build and yield an unsplittable group", "for", "item", "in", "line", ":", "text", ".", "append", "(", "item", ")", "if", "item", "in", "begin_delim", ":", "break", "if", "not", "text", ":", "break", "yield", "text", "# Now build and yield a splittable group", "level", "=", "0", "text", "=", "[", "]", "for", "item", "in", "line", ":", "if", "item", "in", "begin_delim", ":", "level", "+=", "1", "elif", "item", "in", "end_delim", ":", "level", "-=", "1", "if", "level", "<", "0", ":", "yield", "text", "text", "=", "[", "item", "]", "break", "text", ".", "append", "(", "item", ")", "else", ":", "assert", "not", "text", ",", "text", "break" ]
Split a line into alternating groups. The first group cannot have a line feed inserted, the next one can, etc.
[ "Split", "a", "line", "into", "alternating", "groups", ".", "The", "first", "group", "cannot", "have", "a", "line", "feed", "inserted", "the", "next", "one", "can", "etc", "." ]
python
train
27.470588
google/flatbuffers
python/flatbuffers/builder.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L156-L164
def StartObject(self, numfields): """StartObject initializes bookkeeping for writing a new object.""" self.assertNotNested() # use 32-bit offsets so that arithmetic doesn't overflow. self.current_vtable = [0 for _ in range_func(numfields)] self.objectEnd = self.Offset() self.nested = True
[ "def", "StartObject", "(", "self", ",", "numfields", ")", ":", "self", ".", "assertNotNested", "(", ")", "# use 32-bit offsets so that arithmetic doesn't overflow.", "self", ".", "current_vtable", "=", "[", "0", "for", "_", "in", "range_func", "(", "numfields", ")", "]", "self", ".", "objectEnd", "=", "self", ".", "Offset", "(", ")", "self", ".", "nested", "=", "True" ]
StartObject initializes bookkeeping for writing a new object.
[ "StartObject", "initializes", "bookkeeping", "for", "writing", "a", "new", "object", "." ]
python
train
36.777778
brocade/pynos
pynos/versions/base/yang/ietf_netconf.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/ietf_netconf.py#L313-L323
def copy_config_input_with_inactive(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") copy_config = ET.Element("copy_config") config = copy_config input = ET.SubElement(copy_config, "input") with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "copy_config_input_with_inactive", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "copy_config", "=", "ET", ".", "Element", "(", "\"copy_config\"", ")", "config", "=", "copy_config", "input", "=", "ET", ".", "SubElement", "(", "copy_config", ",", "\"input\"", ")", "with_inactive", "=", "ET", ".", "SubElement", "(", "input", ",", "\"with-inactive\"", ",", "xmlns", "=", "\"http://tail-f.com/ns/netconf/inactive/1.0\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41.454545
saltstack/salt
salt/modules/boto3_route53.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_route53.py#L287-L412
def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerReference=None, Comment='', PrivateZone=False, DelegationSetId=None, region=None, key=None, keyid=None, profile=None): ''' Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. Name The name of the domain. This should be a fully-specified domain, and should terminate with a period. This is the name you have registered with your DNS registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. VPCId When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCName. Ignored if passed for a non-private zone. VPCName When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCId. Ignored if passed for a non-private zone. VPCRegion When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If this fails, you'll need to provide an explicit value for this option. Ignored if passed for a non-private zone. CallerReference A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. This is a required parameter when creating new Hosted Zones. Maximum length of 128. Comment Any comments you want to include about the hosted zone. PrivateZone Boolean - Set to True if creating a private hosted zone. DelegationSetId If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO create_delegation_set() is not yet implemented, so you'd need to manually create any delegation sets before utilizing this. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto3_route53.create_hosted_zone example.org. ''' if not Name.endswith('.'): raise SaltInvocationError('Domain must be fully-qualified, complete with trailing period.') Name = aws_encode(Name) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone, region=region, key=key, keyid=keyid, profile=profile) if deets: log.info( 'Route 53 hosted zone %s already exists. You may want to pass ' 'e.g. \'PrivateZone=True\' or similar...', Name ) return None args = { 'Name': Name, 'CallerReference': CallerReference, 'HostedZoneConfig': { 'Comment': Comment, 'PrivateZone': PrivateZone } } args.update({'DelegationSetId': DelegationSetId}) if DelegationSetId else None if PrivateZone: if not _exactly_one((VPCName, VPCId)): raise SaltInvocationError('Either VPCName or VPCId is required when creating a ' 'private zone.') vpcs = __salt__['boto_vpc.describe_vpcs']( vpc_id=VPCId, name=VPCName, region=region, key=key, keyid=keyid, profile=profile).get('vpcs', []) if VPCRegion and vpcs: vpcs = [v for v in vpcs if v['region'] == VPCRegion] if not vpcs: log.error('Private zone requested but no VPC matching given criteria found.') return None if len(vpcs) > 1: log.error( 'Private zone requested but multiple VPCs matching given ' 'criteria found: %s.', [v['id'] for v in vpcs] ) return None vpc = vpcs[0] if VPCName: VPCId = vpc['id'] if not VPCRegion: VPCRegion = vpc['region'] args.update({'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}) else: if any((VPCId, VPCName, VPCRegion)): log.info('Options VPCId, VPCName, and VPCRegion are ignored when creating ' 'non-private zones.') tries = 10 while tries: try: r = conn.create_hosted_zone(**args) r.pop('ResponseMetadata', None) if _wait_for_sync(r['ChangeInfo']['Id'], conn): return [r] return [] except ClientError as e: if tries and e.response.get('Error', {}).get('Code') == 'Throttling': log.debug('Throttled by AWS API.') time.sleep(3) tries -= 1 continue log.error('Failed to create hosted zone %s: %s', Name, e) return [] return []
[ "def", "create_hosted_zone", "(", "Name", ",", "VPCId", "=", "None", ",", "VPCName", "=", "None", ",", "VPCRegion", "=", "None", ",", "CallerReference", "=", "None", ",", "Comment", "=", "''", ",", "PrivateZone", "=", "False", ",", "DelegationSetId", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "not", "Name", ".", "endswith", "(", "'.'", ")", ":", "raise", "SaltInvocationError", "(", "'Domain must be fully-qualified, complete with trailing period.'", ")", "Name", "=", "aws_encode", "(", "Name", ")", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "deets", "=", "find_hosted_zone", "(", "Name", "=", "Name", ",", "PrivateZone", "=", "PrivateZone", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "deets", ":", "log", ".", "info", "(", "'Route 53 hosted zone %s already exists. You may want to pass '", "'e.g. \\'PrivateZone=True\\' or similar...'", ",", "Name", ")", "return", "None", "args", "=", "{", "'Name'", ":", "Name", ",", "'CallerReference'", ":", "CallerReference", ",", "'HostedZoneConfig'", ":", "{", "'Comment'", ":", "Comment", ",", "'PrivateZone'", ":", "PrivateZone", "}", "}", "args", ".", "update", "(", "{", "'DelegationSetId'", ":", "DelegationSetId", "}", ")", "if", "DelegationSetId", "else", "None", "if", "PrivateZone", ":", "if", "not", "_exactly_one", "(", "(", "VPCName", ",", "VPCId", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Either VPCName or VPCId is required when creating a '", "'private zone.'", ")", "vpcs", "=", "__salt__", "[", "'boto_vpc.describe_vpcs'", "]", "(", "vpc_id", "=", "VPCId", ",", "name", "=", "VPCName", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", ".", "get", "(", "'vpcs'", ",", "[", "]", ")", "if", "VPCRegion", "and", "vpcs", ":", "vpcs", "=", "[", "v", "for", "v", "in", "vpcs", "if", "v", "[", "'region'", "]", "==", "VPCRegion", "]", "if", "not", "vpcs", ":", "log", ".", "error", "(", "'Private zone requested but no VPC matching given criteria found.'", ")", "return", "None", "if", "len", "(", "vpcs", ")", ">", "1", ":", "log", ".", "error", "(", "'Private zone requested but multiple VPCs matching given '", "'criteria found: %s.'", ",", "[", "v", "[", "'id'", "]", "for", "v", "in", "vpcs", "]", ")", "return", "None", "vpc", "=", "vpcs", "[", "0", "]", "if", "VPCName", ":", "VPCId", "=", "vpc", "[", "'id'", "]", "if", "not", "VPCRegion", ":", "VPCRegion", "=", "vpc", "[", "'region'", "]", "args", ".", "update", "(", "{", "'VPC'", ":", "{", "'VPCId'", ":", "VPCId", ",", "'VPCRegion'", ":", "VPCRegion", "}", "}", ")", "else", ":", "if", "any", "(", "(", "VPCId", ",", "VPCName", ",", "VPCRegion", ")", ")", ":", "log", ".", "info", "(", "'Options VPCId, VPCName, and VPCRegion are ignored when creating '", "'non-private zones.'", ")", "tries", "=", "10", "while", "tries", ":", "try", ":", "r", "=", "conn", ".", "create_hosted_zone", "(", "*", "*", "args", ")", "r", ".", "pop", "(", "'ResponseMetadata'", ",", "None", ")", "if", "_wait_for_sync", "(", "r", "[", "'ChangeInfo'", "]", "[", "'Id'", "]", ",", "conn", ")", ":", "return", "[", "r", "]", "return", "[", "]", "except", "ClientError", "as", "e", ":", "if", "tries", "and", "e", ".", "response", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Code'", ")", "==", "'Throttling'", ":", "log", ".", "debug", "(", "'Throttled by AWS API.'", ")", "time", ".", "sleep", "(", "3", ")", "tries", "-=", "1", "continue", "log", ".", "error", "(", "'Failed to create hosted zone %s: %s'", ",", "Name", ",", "e", ")", "return", "[", "]", "return", "[", "]" ]
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. Name The name of the domain. This should be a fully-specified domain, and should terminate with a period. This is the name you have registered with your DNS registrar. It is also the name you will delegate from your registrar to the Amazon Route 53 delegation servers returned in response to this request. VPCId When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCName. Ignored if passed for a non-private zone. VPCName When creating a private hosted zone, either the VPC ID or VPC Name to associate with is required. Exclusive with VPCId. Ignored if passed for a non-private zone. VPCRegion When creating a private hosted zone, the region of the associated VPC is required. If not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If this fails, you'll need to provide an explicit value for this option. Ignored if passed for a non-private zone. CallerReference A unique string that identifies the request and that allows create_hosted_zone() calls to be retried without the risk of executing the operation twice. This is a required parameter when creating new Hosted Zones. Maximum length of 128. Comment Any comments you want to include about the hosted zone. PrivateZone Boolean - Set to True if creating a private hosted zone. DelegationSetId If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. Note that XXX TODO create_delegation_set() is not yet implemented, so you'd need to manually create any delegation sets before utilizing this. region Region endpoint to connect to. key AWS key to bind with. keyid AWS keyid to bind with. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example:: salt myminion boto3_route53.create_hosted_zone example.org.
[ "Create", "a", "new", "Route53", "Hosted", "Zone", ".", "Returns", "a", "Python", "data", "structure", "with", "information", "about", "the", "newly", "created", "Hosted", "Zone", "." ]
python
train
41.079365
google/grumpy
third_party/stdlib/optparse.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/optparse.py#L1024-L1052
def add_option(self, *args, **kwargs): """add_option(Option) add_option(opt_str, ..., kwarg=val, ...) """ if type(args[0]) in types.StringTypes: option = self.option_class(*args, **kwargs) elif len(args) == 1 and not kwargs: option = args[0] if not isinstance(option, Option): raise TypeError, "not an Option instance: %r" % option else: raise TypeError, "invalid arguments" self._check_conflict(option) self.option_list.append(option) option.container = self for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option if option.dest is not None: # option has a dest, we need a default if option.default is not NO_DEFAULT: self.defaults[option.dest] = option.default elif option.dest not in self.defaults: self.defaults[option.dest] = None return option
[ "def", "add_option", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "args", "[", "0", "]", ")", "in", "types", ".", "StringTypes", ":", "option", "=", "self", ".", "option_class", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "len", "(", "args", ")", "==", "1", "and", "not", "kwargs", ":", "option", "=", "args", "[", "0", "]", "if", "not", "isinstance", "(", "option", ",", "Option", ")", ":", "raise", "TypeError", ",", "\"not an Option instance: %r\"", "%", "option", "else", ":", "raise", "TypeError", ",", "\"invalid arguments\"", "self", ".", "_check_conflict", "(", "option", ")", "self", ".", "option_list", ".", "append", "(", "option", ")", "option", ".", "container", "=", "self", "for", "opt", "in", "option", ".", "_short_opts", ":", "self", ".", "_short_opt", "[", "opt", "]", "=", "option", "for", "opt", "in", "option", ".", "_long_opts", ":", "self", ".", "_long_opt", "[", "opt", "]", "=", "option", "if", "option", ".", "dest", "is", "not", "None", ":", "# option has a dest, we need a default", "if", "option", ".", "default", "is", "not", "NO_DEFAULT", ":", "self", ".", "defaults", "[", "option", ".", "dest", "]", "=", "option", ".", "default", "elif", "option", ".", "dest", "not", "in", "self", ".", "defaults", ":", "self", ".", "defaults", "[", "option", ".", "dest", "]", "=", "None", "return", "option" ]
add_option(Option) add_option(opt_str, ..., kwarg=val, ...)
[ "add_option", "(", "Option", ")", "add_option", "(", "opt_str", "...", "kwarg", "=", "val", "...", ")" ]
python
valid
36.034483
ArchiveTeam/wpull
wpull/application/tasks/log.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/log.py#L86-L114
def _setup_file_logger(cls, session: AppSession, args): '''Set up the file message logger. A file log handler and with a formatter is added to the root logger. ''' if not (args.output_file or args.append_output): return logger = logging.getLogger() formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') if args.output_file: filename = args.output_file mode = 'w' else: filename = args.append_output mode = 'a' session.file_log_handler = handler = logging.FileHandler( filename, mode, encoding='utf-8') handler.setFormatter(formatter) logger.addHandler(handler) if args.verbosity == logging.DEBUG: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO)
[ "def", "_setup_file_logger", "(", "cls", ",", "session", ":", "AppSession", ",", "args", ")", ":", "if", "not", "(", "args", ".", "output_file", "or", "args", ".", "append_output", ")", ":", "return", "logger", "=", "logging", ".", "getLogger", "(", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s'", ")", "if", "args", ".", "output_file", ":", "filename", "=", "args", ".", "output_file", "mode", "=", "'w'", "else", ":", "filename", "=", "args", ".", "append_output", "mode", "=", "'a'", "session", ".", "file_log_handler", "=", "handler", "=", "logging", ".", "FileHandler", "(", "filename", ",", "mode", ",", "encoding", "=", "'utf-8'", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "handler", ")", "if", "args", ".", "verbosity", "==", "logging", ".", "DEBUG", ":", "handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "handler", ".", "setLevel", "(", "logging", ".", "INFO", ")" ]
Set up the file message logger. A file log handler and with a formatter is added to the root logger.
[ "Set", "up", "the", "file", "message", "logger", "." ]
python
train
30.62069
b3j0f/task
b3j0f/task/condition.py
https://github.com/b3j0f/task/blob/3e3e48633b1c9a52911c19df3a44fba4b744f60e/b3j0f/task/condition.py#L43-L84
def during(rrule, duration=None, timestamp=None, **kwargs): """ Check if input timestamp is in rrule+duration period :param rrule: rrule to check :type rrule: str or dict (freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.) :param dict duration: time duration from rrule step. Ex:{'minutes': 60} :param float timestamp: timestamp to check between rrule+duration. If None, use now """ result = False # if rrule is a string expression if isinstance(rrule, string_types): rrule_object = rrule_class.rrulestr(rrule) else: rrule_object = rrule_class(**rrule) # if timestamp is None, use now if timestamp is None: timestamp = time() # get now object now = datetime.fromtimestamp(timestamp) # get delta object duration_delta = now if duration is None else relativedelta(**duration) # get last date last_date = rrule_object.before(now, inc=True) # if a previous date exists if last_date is not None: next_date = last_date + duration_delta # check if now is between last_date and next_date result = last_date <= now <= next_date return result
[ "def", "during", "(", "rrule", ",", "duration", "=", "None", ",", "timestamp", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "False", "# if rrule is a string expression", "if", "isinstance", "(", "rrule", ",", "string_types", ")", ":", "rrule_object", "=", "rrule_class", ".", "rrulestr", "(", "rrule", ")", "else", ":", "rrule_object", "=", "rrule_class", "(", "*", "*", "rrule", ")", "# if timestamp is None, use now", "if", "timestamp", "is", "None", ":", "timestamp", "=", "time", "(", ")", "# get now object", "now", "=", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", "# get delta object", "duration_delta", "=", "now", "if", "duration", "is", "None", "else", "relativedelta", "(", "*", "*", "duration", ")", "# get last date", "last_date", "=", "rrule_object", ".", "before", "(", "now", ",", "inc", "=", "True", ")", "# if a previous date exists", "if", "last_date", "is", "not", "None", ":", "next_date", "=", "last_date", "+", "duration_delta", "# check if now is between last_date and next_date", "result", "=", "last_date", "<=", "now", "<=", "next_date", "return", "result" ]
Check if input timestamp is in rrule+duration period :param rrule: rrule to check :type rrule: str or dict (freq, dtstart, interval, count, wkst, until, bymonth, byminute, etc.) :param dict duration: time duration from rrule step. Ex:{'minutes': 60} :param float timestamp: timestamp to check between rrule+duration. If None, use now
[ "Check", "if", "input", "timestamp", "is", "in", "rrule", "+", "duration", "period" ]
python
train
27.928571
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1925-L1936
def disable_contactgroup_svc_notifications(self, contactgroup): """Disable service notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.disable_contact_svc_notifications(self.daemon.contacts[contact_id])
[ "def", "disable_contactgroup_svc_notifications", "(", "self", ",", "contactgroup", ")", ":", "for", "contact_id", "in", "contactgroup", ".", "get_contacts", "(", ")", ":", "self", ".", "disable_contact_svc_notifications", "(", "self", ".", "daemon", ".", "contacts", "[", "contact_id", "]", ")" ]
Disable service notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None
[ "Disable", "service", "notifications", "for", "a", "contactgroup", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
44.583333
apache/incubator-mxnet
python/mxnet/contrib/onnx/onnx2mx/_translation_utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_translation_utils.py#L148-L157
def _fix_bias(op_name, attrs, num_inputs): """A workaround for 'use_bias' attribute since onnx don't provide this attribute, we have to check the number of inputs to decide it.""" if num_inputs == 3: attrs['no_bias'] = False elif num_inputs == 2: attrs['no_bias'] = True else: raise ValueError("Unexpected number of inputs for: {}".format(op_name)) return attrs
[ "def", "_fix_bias", "(", "op_name", ",", "attrs", ",", "num_inputs", ")", ":", "if", "num_inputs", "==", "3", ":", "attrs", "[", "'no_bias'", "]", "=", "False", "elif", "num_inputs", "==", "2", ":", "attrs", "[", "'no_bias'", "]", "=", "True", "else", ":", "raise", "ValueError", "(", "\"Unexpected number of inputs for: {}\"", ".", "format", "(", "op_name", ")", ")", "return", "attrs" ]
A workaround for 'use_bias' attribute since onnx don't provide this attribute, we have to check the number of inputs to decide it.
[ "A", "workaround", "for", "use_bias", "attribute", "since", "onnx", "don", "t", "provide", "this", "attribute", "we", "have", "to", "check", "the", "number", "of", "inputs", "to", "decide", "it", "." ]
python
train
40
pyecore/pyecoregen
pyecoregen/ecore.py
https://github.com/pyecore/pyecoregen/blob/8c7a792f46d7d94e5d13e00e2967dd237351a4cf/pyecoregen/ecore.py#L59-L71
def imported_classifiers_package(p: ecore.EPackage): """Determines which classifiers have to be imported into given package.""" classes = {c for c in p.eClassifiers if isinstance(c, ecore.EClass)} references = itertools.chain(*(c.eAllReferences() for c in classes)) references_types = (r.eType for r in references) imported = {c for c in references_types if getattr(c, 'ePackage', p) is not p} imported_dict = {} for classifier in imported: imported_dict.setdefault(classifier.ePackage, set()).add(classifier) return imported_dict
[ "def", "imported_classifiers_package", "(", "p", ":", "ecore", ".", "EPackage", ")", ":", "classes", "=", "{", "c", "for", "c", "in", "p", ".", "eClassifiers", "if", "isinstance", "(", "c", ",", "ecore", ".", "EClass", ")", "}", "references", "=", "itertools", ".", "chain", "(", "*", "(", "c", ".", "eAllReferences", "(", ")", "for", "c", "in", "classes", ")", ")", "references_types", "=", "(", "r", ".", "eType", "for", "r", "in", "references", ")", "imported", "=", "{", "c", "for", "c", "in", "references_types", "if", "getattr", "(", "c", ",", "'ePackage'", ",", "p", ")", "is", "not", "p", "}", "imported_dict", "=", "{", "}", "for", "classifier", "in", "imported", ":", "imported_dict", ".", "setdefault", "(", "classifier", ".", "ePackage", ",", "set", "(", ")", ")", ".", "add", "(", "classifier", ")", "return", "imported_dict" ]
Determines which classifiers have to be imported into given package.
[ "Determines", "which", "classifiers", "have", "to", "be", "imported", "into", "given", "package", "." ]
python
train
45.923077
KelSolaar/Foundations
foundations/ui/common.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/ui/common.py#L42-L58
def center_widget_on_screen(widget, screen=None): """ Centers given Widget on the screen. :param widget: Current Widget. :type widget: QWidget :param screen: Screen used for centering. :type screen: int :return: Definition success. :rtype: bool """ screen = screen and screen or QApplication.desktop().primaryScreen() desktop_width = QApplication.desktop().screenGeometry(screen).width() desktop_height = QApplication.desktop().screenGeometry(screen).height() widget.move(desktop_width / 2 - widget.sizeHint().width() / 2, desktop_height / 2 - widget.sizeHint().height() / 2) return True
[ "def", "center_widget_on_screen", "(", "widget", ",", "screen", "=", "None", ")", ":", "screen", "=", "screen", "and", "screen", "or", "QApplication", ".", "desktop", "(", ")", ".", "primaryScreen", "(", ")", "desktop_width", "=", "QApplication", ".", "desktop", "(", ")", ".", "screenGeometry", "(", "screen", ")", ".", "width", "(", ")", "desktop_height", "=", "QApplication", ".", "desktop", "(", ")", ".", "screenGeometry", "(", "screen", ")", ".", "height", "(", ")", "widget", ".", "move", "(", "desktop_width", "/", "2", "-", "widget", ".", "sizeHint", "(", ")", ".", "width", "(", ")", "/", "2", ",", "desktop_height", "/", "2", "-", "widget", ".", "sizeHint", "(", ")", ".", "height", "(", ")", "/", "2", ")", "return", "True" ]
Centers given Widget on the screen. :param widget: Current Widget. :type widget: QWidget :param screen: Screen used for centering. :type screen: int :return: Definition success. :rtype: bool
[ "Centers", "given", "Widget", "on", "the", "screen", "." ]
python
train
37
snare/scruffy
scruffy/file.py
https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/file.py#L162-L189
def configure(self): """ Configure the Python logging module for this file. """ # build a file handler for this file handler = logging.FileHandler(self.path, delay=True) # if we got a format string, create a formatter with it if self._format: handler.setFormatter(logging.Formatter(self._format)) # if we got a string for the formatter, assume it's the name of a # formatter in the environment's config if type(self._formatter) == str: if self._env and self._env.config.logging.dict_config.formatters[self._formatter]: d = self._env.config.logging.dict_config.formatters[self._formatter].to_dict() handler.setFormatter(logging.Formatter(**d)) elif type(self._formatter) == dict: # if it's a dict it must be the actual formatter params handler.setFormatter(logging.Formatter(**self._formatter)) # add the file handler to whatever loggers were specified if len(self._loggers): for name in self._loggers: logging.getLogger(name).addHandler(handler) else: # none specified, just add it to the root logger logging.getLogger().addHandler(handler)
[ "def", "configure", "(", "self", ")", ":", "# build a file handler for this file", "handler", "=", "logging", ".", "FileHandler", "(", "self", ".", "path", ",", "delay", "=", "True", ")", "# if we got a format string, create a formatter with it", "if", "self", ".", "_format", ":", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "self", ".", "_format", ")", ")", "# if we got a string for the formatter, assume it's the name of a", "# formatter in the environment's config", "if", "type", "(", "self", ".", "_formatter", ")", "==", "str", ":", "if", "self", ".", "_env", "and", "self", ".", "_env", ".", "config", ".", "logging", ".", "dict_config", ".", "formatters", "[", "self", ".", "_formatter", "]", ":", "d", "=", "self", ".", "_env", ".", "config", ".", "logging", ".", "dict_config", ".", "formatters", "[", "self", ".", "_formatter", "]", ".", "to_dict", "(", ")", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "*", "*", "d", ")", ")", "elif", "type", "(", "self", ".", "_formatter", ")", "==", "dict", ":", "# if it's a dict it must be the actual formatter params", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "*", "*", "self", ".", "_formatter", ")", ")", "# add the file handler to whatever loggers were specified", "if", "len", "(", "self", ".", "_loggers", ")", ":", "for", "name", "in", "self", ".", "_loggers", ":", "logging", ".", "getLogger", "(", "name", ")", ".", "addHandler", "(", "handler", ")", "else", ":", "# none specified, just add it to the root logger", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "handler", ")" ]
Configure the Python logging module for this file.
[ "Configure", "the", "Python", "logging", "module", "for", "this", "file", "." ]
python
test
45
Jaymon/endpoints
endpoints/http.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L562-L586
def base(self, *paths, **query_kwargs): """create a new url object using the current base path as a base if you had requested /foo/bar, then this would append *paths and **query_kwargs to /foo/bar :example: # current path: /foo/bar print url # http://host.com/foo/bar print url.base() # http://host.com/foo/bar print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the current path without query params :param **query_kwargs: dict, any query string params to add """ kwargs = self._normalize_params(*paths, **query_kwargs) if self.path: if "path" in kwargs: paths = self.normalize_paths(self.path, kwargs["path"]) kwargs["path"] = "/".join(paths) else: kwargs["path"] = self.path return self.create(self.root, **kwargs)
[ "def", "base", "(", "self", ",", "*", "paths", ",", "*", "*", "query_kwargs", ")", ":", "kwargs", "=", "self", ".", "_normalize_params", "(", "*", "paths", ",", "*", "*", "query_kwargs", ")", "if", "self", ".", "path", ":", "if", "\"path\"", "in", "kwargs", ":", "paths", "=", "self", ".", "normalize_paths", "(", "self", ".", "path", ",", "kwargs", "[", "\"path\"", "]", ")", "kwargs", "[", "\"path\"", "]", "=", "\"/\"", ".", "join", "(", "paths", ")", "else", ":", "kwargs", "[", "\"path\"", "]", "=", "self", ".", "path", "return", "self", ".", "create", "(", "self", ".", "root", ",", "*", "*", "kwargs", ")" ]
create a new url object using the current base path as a base if you had requested /foo/bar, then this would append *paths and **query_kwargs to /foo/bar :example: # current path: /foo/bar print url # http://host.com/foo/bar print url.base() # http://host.com/foo/bar print url.base("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the current path without query params :param **query_kwargs: dict, any query string params to add
[ "create", "a", "new", "url", "object", "using", "the", "current", "base", "path", "as", "a", "base" ]
python
train
38.36
openshift/openshift-restclient-python
openshift/dynamic/client.py
https://github.com/openshift/openshift-restclient-python/blob/5d86bf5ba4e723bcc4d33ad47077aca01edca0f6/openshift/dynamic/client.py#L698-L733
def get_resources_for_api_version(self, prefix, group, version, preferred): """ returns a dictionary of resources associated with provided (prefix, group, version)""" resources = defaultdict(list) subresources = {} path = '/'.join(filter(None, [prefix, group, version])) resources_response = load_json(self.client.request('GET', path))['resources'] resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response)) subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response)) for subresource in subresources_raw: resource, name = subresource['name'].split('/') if not subresources.get(resource): subresources[resource] = {} subresources[resource][name] = subresource for resource in resources_raw: # Prevent duplicate keys for key in ('prefix', 'group', 'api_version', 'client', 'preferred'): resource.pop(key, None) resourceobj = Resource( prefix=prefix, group=group, api_version=version, client=self.client, preferred=preferred, subresources=subresources.get(resource['name']), **resource ) resources[resource['kind']].append(resourceobj) resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind']) resources[resource_list.kind].append(resource_list) return resources
[ "def", "get_resources_for_api_version", "(", "self", ",", "prefix", ",", "group", ",", "version", ",", "preferred", ")", ":", "resources", "=", "defaultdict", "(", "list", ")", "subresources", "=", "{", "}", "path", "=", "'/'", ".", "join", "(", "filter", "(", "None", ",", "[", "prefix", ",", "group", ",", "version", "]", ")", ")", "resources_response", "=", "load_json", "(", "self", ".", "client", ".", "request", "(", "'GET'", ",", "path", ")", ")", "[", "'resources'", "]", "resources_raw", "=", "list", "(", "filter", "(", "lambda", "resource", ":", "'/'", "not", "in", "resource", "[", "'name'", "]", ",", "resources_response", ")", ")", "subresources_raw", "=", "list", "(", "filter", "(", "lambda", "resource", ":", "'/'", "in", "resource", "[", "'name'", "]", ",", "resources_response", ")", ")", "for", "subresource", "in", "subresources_raw", ":", "resource", ",", "name", "=", "subresource", "[", "'name'", "]", ".", "split", "(", "'/'", ")", "if", "not", "subresources", ".", "get", "(", "resource", ")", ":", "subresources", "[", "resource", "]", "=", "{", "}", "subresources", "[", "resource", "]", "[", "name", "]", "=", "subresource", "for", "resource", "in", "resources_raw", ":", "# Prevent duplicate keys", "for", "key", "in", "(", "'prefix'", ",", "'group'", ",", "'api_version'", ",", "'client'", ",", "'preferred'", ")", ":", "resource", ".", "pop", "(", "key", ",", "None", ")", "resourceobj", "=", "Resource", "(", "prefix", "=", "prefix", ",", "group", "=", "group", ",", "api_version", "=", "version", ",", "client", "=", "self", ".", "client", ",", "preferred", "=", "preferred", ",", "subresources", "=", "subresources", ".", "get", "(", "resource", "[", "'name'", "]", ")", ",", "*", "*", "resource", ")", "resources", "[", "resource", "[", "'kind'", "]", "]", ".", "append", "(", "resourceobj", ")", "resource_list", "=", "ResourceList", "(", "self", ".", "client", ",", "group", "=", "group", ",", "api_version", "=", "version", ",", "base_kind", "=", "resource", "[", "'kind'", "]", ")", "resources", "[", "resource_list", ".", "kind", "]", ".", "append", "(", "resource_list", ")", "return", "resources" ]
returns a dictionary of resources associated with provided (prefix, group, version)
[ "returns", "a", "dictionary", "of", "resources", "associated", "with", "provided", "(", "prefix", "group", "version", ")" ]
python
train
44.166667
secdev/scapy
scapy/layers/lltd.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/lltd.py#L809-L836
def parse(self, plist): """Update the builder using the provided `plist`. `plist` can be either a Packet() or a PacketList(). """ if not isinstance(plist, PacketList): plist = PacketList(plist) for pkt in plist[LLTD]: if LLTDQueryLargeTlv in pkt: key = "%s:%s:%d" % (pkt.real_dst, pkt.real_src, pkt.seq) self.types_offsets[key] = (pkt[LLTDQueryLargeTlv].type, pkt[LLTDQueryLargeTlv].offset) elif LLTDQueryLargeTlvResp in pkt: try: key = "%s:%s:%d" % (pkt.real_src, pkt.real_dst, pkt.seq) content, offset = self.types_offsets[key] except KeyError: continue loc = slice(offset, offset + pkt[LLTDQueryLargeTlvResp].len) key = "%s > %s [%s]" % ( pkt.real_src, pkt.real_dst, LLTDQueryLargeTlv.fields_desc[0].i2s.get(content, content), ) data = self.data.setdefault(key, array("B")) datalen = len(data) if datalen < loc.stop: data.extend(array("B", b"\x00" * (loc.stop - datalen))) data[loc] = array("B", pkt[LLTDQueryLargeTlvResp].value)
[ "def", "parse", "(", "self", ",", "plist", ")", ":", "if", "not", "isinstance", "(", "plist", ",", "PacketList", ")", ":", "plist", "=", "PacketList", "(", "plist", ")", "for", "pkt", "in", "plist", "[", "LLTD", "]", ":", "if", "LLTDQueryLargeTlv", "in", "pkt", ":", "key", "=", "\"%s:%s:%d\"", "%", "(", "pkt", ".", "real_dst", ",", "pkt", ".", "real_src", ",", "pkt", ".", "seq", ")", "self", ".", "types_offsets", "[", "key", "]", "=", "(", "pkt", "[", "LLTDQueryLargeTlv", "]", ".", "type", ",", "pkt", "[", "LLTDQueryLargeTlv", "]", ".", "offset", ")", "elif", "LLTDQueryLargeTlvResp", "in", "pkt", ":", "try", ":", "key", "=", "\"%s:%s:%d\"", "%", "(", "pkt", ".", "real_src", ",", "pkt", ".", "real_dst", ",", "pkt", ".", "seq", ")", "content", ",", "offset", "=", "self", ".", "types_offsets", "[", "key", "]", "except", "KeyError", ":", "continue", "loc", "=", "slice", "(", "offset", ",", "offset", "+", "pkt", "[", "LLTDQueryLargeTlvResp", "]", ".", "len", ")", "key", "=", "\"%s > %s [%s]\"", "%", "(", "pkt", ".", "real_src", ",", "pkt", ".", "real_dst", ",", "LLTDQueryLargeTlv", ".", "fields_desc", "[", "0", "]", ".", "i2s", ".", "get", "(", "content", ",", "content", ")", ",", ")", "data", "=", "self", ".", "data", ".", "setdefault", "(", "key", ",", "array", "(", "\"B\"", ")", ")", "datalen", "=", "len", "(", "data", ")", "if", "datalen", "<", "loc", ".", "stop", ":", "data", ".", "extend", "(", "array", "(", "\"B\"", ",", "b\"\\x00\"", "*", "(", "loc", ".", "stop", "-", "datalen", ")", ")", ")", "data", "[", "loc", "]", "=", "array", "(", "\"B\"", ",", "pkt", "[", "LLTDQueryLargeTlvResp", "]", ".", "value", ")" ]
Update the builder using the provided `plist`. `plist` can be either a Packet() or a PacketList().
[ "Update", "the", "builder", "using", "the", "provided", "plist", ".", "plist", "can", "be", "either", "a", "Packet", "()", "or", "a", "PacketList", "()", "." ]
python
train
47.178571
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L668-L679
def _parse_logging(log_values: dict, service_config: dict): """Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification """ for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
[ "def", "_parse_logging", "(", "log_values", ":", "dict", ",", "service_config", ":", "dict", ")", ":", "for", "log_key", ",", "log_value", "in", "log_values", ".", "items", "(", ")", ":", "if", "'driver'", "in", "log_key", ":", "service_config", "[", "'log_driver'", "]", "=", "log_value", "if", "'options'", "in", "log_key", ":", "service_config", "[", "'log_driver_options'", "]", "=", "log_value" ]
Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification
[ "Parse", "log", "key", "." ]
python
train
38.916667
OCA/openupgradelib
openupgradelib/openupgrade_90.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade_90.py#L16-L81
def convert_binary_field_to_attachment(env, field_spec): """This method converts the 8.0 binary fields to attachments like Odoo 9.0 makes with the new attachment=True attribute. It has to be called on post-migration script, as there's a call to get the res_name of the target model, which is not yet loaded on pre-migration. You need to rename the involved column in pre-migration script if you don't want to lose your data in the process. This method also removes after the conversion the source column for avoiding data duplication. This is done through Odoo ORM, because there's a lot of logic associated with guessing MIME type, format and length, file saving in store... that is doesn't worth to recreate it via SQL as there's not too much performance problem. :param env: Odoo environment :param field_spec: A dictionary with the ORM model name as key, and as dictionary values a tuple with: * field name to be converted as attachment as first element. * SQL column name that contains actual data as second element. If the second element is None, then the column name is taken calling `get_legacy_name` method, which is the typical technique. """ logger = logging.getLogger('OpenUpgrade') attachment_model = env['ir.attachment'] for model_name in field_spec: model = env[model_name] for field, column in field_spec[model_name]: if column is None: column = openupgrade.get_legacy_name(field) logger.info( "Converting to attachment field {} from model {} stored in " "column {}".format(field, model_name, column) ) last_id = 0 while True: env.cr.execute( """SELECT id, {0} FROM {1} WHERE {0} IS NOT NULL AND id > {2} ORDER BY id LIMIT 500; """.format(column, model._table, last_id) ) rows = env.cr.fetchall() if not rows: break logger.info( " converting {0} items starting after {1}..." "".format(len(rows), last_id)) for row in rows: last_id = row[0] data = bytes(row[1]) if data and data != 'None': attachment_model.create({ 'name': field, 'res_model': model_name, 'res_field': field, 'res_id': last_id, 'type': 'binary', 'datas': data, }) # Remove source column for cleaning the room env.cr.execute("ALTER TABLE {} DROP COLUMN {}".format( model._table, column, ))
[ "def", "convert_binary_field_to_attachment", "(", "env", ",", "field_spec", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'OpenUpgrade'", ")", "attachment_model", "=", "env", "[", "'ir.attachment'", "]", "for", "model_name", "in", "field_spec", ":", "model", "=", "env", "[", "model_name", "]", "for", "field", ",", "column", "in", "field_spec", "[", "model_name", "]", ":", "if", "column", "is", "None", ":", "column", "=", "openupgrade", ".", "get_legacy_name", "(", "field", ")", "logger", ".", "info", "(", "\"Converting to attachment field {} from model {} stored in \"", "\"column {}\"", ".", "format", "(", "field", ",", "model_name", ",", "column", ")", ")", "last_id", "=", "0", "while", "True", ":", "env", ".", "cr", ".", "execute", "(", "\"\"\"SELECT id, {0} FROM {1} WHERE {0} IS NOT NULL AND id > {2}\n ORDER BY id LIMIT 500;\n \"\"\"", ".", "format", "(", "column", ",", "model", ".", "_table", ",", "last_id", ")", ")", "rows", "=", "env", ".", "cr", ".", "fetchall", "(", ")", "if", "not", "rows", ":", "break", "logger", ".", "info", "(", "\" converting {0} items starting after {1}...\"", "\"\"", ".", "format", "(", "len", "(", "rows", ")", ",", "last_id", ")", ")", "for", "row", "in", "rows", ":", "last_id", "=", "row", "[", "0", "]", "data", "=", "bytes", "(", "row", "[", "1", "]", ")", "if", "data", "and", "data", "!=", "'None'", ":", "attachment_model", ".", "create", "(", "{", "'name'", ":", "field", ",", "'res_model'", ":", "model_name", ",", "'res_field'", ":", "field", ",", "'res_id'", ":", "last_id", ",", "'type'", ":", "'binary'", ",", "'datas'", ":", "data", ",", "}", ")", "# Remove source column for cleaning the room", "env", ".", "cr", ".", "execute", "(", "\"ALTER TABLE {} DROP COLUMN {}\"", ".", "format", "(", "model", ".", "_table", ",", "column", ",", ")", ")" ]
This method converts the 8.0 binary fields to attachments like Odoo 9.0 makes with the new attachment=True attribute. It has to be called on post-migration script, as there's a call to get the res_name of the target model, which is not yet loaded on pre-migration. You need to rename the involved column in pre-migration script if you don't want to lose your data in the process. This method also removes after the conversion the source column for avoiding data duplication. This is done through Odoo ORM, because there's a lot of logic associated with guessing MIME type, format and length, file saving in store... that is doesn't worth to recreate it via SQL as there's not too much performance problem. :param env: Odoo environment :param field_spec: A dictionary with the ORM model name as key, and as dictionary values a tuple with: * field name to be converted as attachment as first element. * SQL column name that contains actual data as second element. If the second element is None, then the column name is taken calling `get_legacy_name` method, which is the typical technique.
[ "This", "method", "converts", "the", "8", ".", "0", "binary", "fields", "to", "attachments", "like", "Odoo", "9", ".", "0", "makes", "with", "the", "new", "attachment", "=", "True", "attribute", ".", "It", "has", "to", "be", "called", "on", "post", "-", "migration", "script", "as", "there", "s", "a", "call", "to", "get", "the", "res_name", "of", "the", "target", "model", "which", "is", "not", "yet", "loaded", "on", "pre", "-", "migration", "." ]
python
train
43.863636
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2015_04_05/file/fileservice.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/file/fileservice.py#L1084-L1123
def _list_directories_and_files(self, share_name, directory_name=None, marker=None, max_results=None, timeout=None): ''' Returns a list of the directories and files under the specified share. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str marker: A string value that identifies the portion of the list to be returned with the next list operation. The operation returns a next_marker value within the response body if the list returned was not complete. The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque to the client. :param int max_results: Specifies the maximum number of files to return, including all directory elements. If the request does not specify max_results or specifies a value greater than 5,000, the server will return up to 5,000 items. Setting max_results to a value less than or equal to zero results in error response code 400 (Bad Request). :param int timeout: The timeout parameter is expressed in seconds. ''' _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = _get_path(share_name, directory_name) request.query = [ ('restype', 'directory'), ('comp', 'list'), ('marker', _to_str(marker)), ('maxresults', _int_to_str(max_results)), ('timeout', _int_to_str(timeout)), ] response = self._perform_request(request) return _convert_xml_to_directories_and_files(response)
[ "def", "_list_directories_and_files", "(", "self", ",", "share_name", ",", "directory_name", "=", "None", ",", "marker", "=", "None", ",", "max_results", "=", "None", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ")", "request", ".", "query", "=", "[", "(", "'restype'", ",", "'directory'", ")", ",", "(", "'comp'", ",", "'list'", ")", ",", "(", "'marker'", ",", "_to_str", "(", "marker", ")", ")", ",", "(", "'maxresults'", ",", "_int_to_str", "(", "max_results", ")", ")", ",", "(", "'timeout'", ",", "_int_to_str", "(", "timeout", ")", ")", ",", "]", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_convert_xml_to_directories_and_files", "(", "response", ")" ]
Returns a list of the directories and files under the specified share. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str marker: A string value that identifies the portion of the list to be returned with the next list operation. The operation returns a next_marker value within the response body if the list returned was not complete. The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque to the client. :param int max_results: Specifies the maximum number of files to return, including all directory elements. If the request does not specify max_results or specifies a value greater than 5,000, the server will return up to 5,000 items. Setting max_results to a value less than or equal to zero results in error response code 400 (Bad Request). :param int timeout: The timeout parameter is expressed in seconds.
[ "Returns", "a", "list", "of", "the", "directories", "and", "files", "under", "the", "specified", "share", "." ]
python
train
46.825
ricequant/rqalpha
rqalpha/model/instrument.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L188-L198
def status(self): """ [str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市, ‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用) """ try: return self.__dict__["status"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id) )
[ "def", "status", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"status\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'status' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市, ‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
[ "[", "str", "]", "合约状态。’Active’", "-", "正常上市", "‘Delisted’", "-", "终止上市", "‘TemporarySuspended’", "-", "暂停上市", "‘PreIPO’", "-", "发行配售期间", "‘FailIPO’", "-", "发行失败(股票专用)" ]
python
train
37.181818
markovmodel/PyEMMA
pyemma/coordinates/data/_base/datasource.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/_base/datasource.py#L519-L608
def write_to_csv(self, filename=None, extension='.dat', overwrite=False, stride=1, chunksize=None, **kw): """ write all data to csv with numpy.savetxt Parameters ---------- filename : str, optional filename string, which may contain placeholders {itraj} and {stride}: * itraj will be replaced by trajetory index * stride is stride argument of this method If filename is not given, it is being tried to obtain the filenames from the data source of this iterator. extension : str, optional, default='.dat' filename extension of created files overwrite : bool, optional, default=False shall existing files be overwritten? If a file exists, this method will raise. stride : int omit every n'th frame chunksize: int, default=None how many frames to process at once kw : dict, optional named arguments passed into numpy.savetxt (header, seperator etc.) Example ------- Assume you want to save features calculated by some FeatureReader to ASCII: >>> import numpy as np, pyemma >>> import os >>> from pyemma.util.files import TemporaryDirectory >>> from pyemma.util.contexts import settings >>> data = [np.random.random((10,3))] * 3 >>> reader = pyemma.coordinates.source(data) >>> filename = "distances_{itraj}.dat" >>> with TemporaryDirectory() as td, settings(show_progress_bars=False): ... out = os.path.join(td, filename) ... reader.write_to_csv(out, header='', delimiter=';') ... print(sorted(os.listdir(td))) ['distances_0.dat', 'distances_1.dat', 'distances_2.dat'] """ import os if not filename: assert hasattr(self, 'filenames') # raise RuntimeError("could not determine filenames") filenames = [] for f in self.filenames: base, _ = os.path.splitext(f) filenames.append(base + extension) elif isinstance(filename, str): filename = filename.replace('{stride}', str(stride)) filenames = [filename.replace('{itraj}', str(itraj)) for itraj in range(self.number_of_trajectories())] else: raise TypeError("filename should be str or None") self.logger.debug("write_to_csv, filenames=%s" % filenames) # check files before starting to write import errno for f in filenames: try: st = os.stat(f) raise OSError(errno.EEXIST) except OSError as e: if e.errno == errno.EEXIST: if overwrite: continue elif e.errno == errno.ENOENT: continue raise f = None from pyemma._base.progress import ProgressReporter pg = ProgressReporter() it = self.iterator(stride, chunk=chunksize, return_trajindex=False) pg.register(it.n_chunks, "saving to csv") with it, pg.context(): oldtraj = -1 for X in it: if oldtraj != it.current_trajindex: if f is not None: f.close() fn = filenames[it.current_trajindex] self.logger.debug("opening file %s for writing csv." % fn) f = open(fn, 'wb') oldtraj = it.current_trajindex np.savetxt(f, X, **kw) f.flush() pg.update(1, 0) if f is not None: f.close()
[ "def", "write_to_csv", "(", "self", ",", "filename", "=", "None", ",", "extension", "=", "'.dat'", ",", "overwrite", "=", "False", ",", "stride", "=", "1", ",", "chunksize", "=", "None", ",", "*", "*", "kw", ")", ":", "import", "os", "if", "not", "filename", ":", "assert", "hasattr", "(", "self", ",", "'filenames'", ")", "# raise RuntimeError(\"could not determine filenames\")", "filenames", "=", "[", "]", "for", "f", "in", "self", ".", "filenames", ":", "base", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "f", ")", "filenames", ".", "append", "(", "base", "+", "extension", ")", "elif", "isinstance", "(", "filename", ",", "str", ")", ":", "filename", "=", "filename", ".", "replace", "(", "'{stride}'", ",", "str", "(", "stride", ")", ")", "filenames", "=", "[", "filename", ".", "replace", "(", "'{itraj}'", ",", "str", "(", "itraj", ")", ")", "for", "itraj", "in", "range", "(", "self", ".", "number_of_trajectories", "(", ")", ")", "]", "else", ":", "raise", "TypeError", "(", "\"filename should be str or None\"", ")", "self", ".", "logger", ".", "debug", "(", "\"write_to_csv, filenames=%s\"", "%", "filenames", ")", "# check files before starting to write", "import", "errno", "for", "f", "in", "filenames", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "f", ")", "raise", "OSError", "(", "errno", ".", "EEXIST", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", ":", "if", "overwrite", ":", "continue", "elif", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "continue", "raise", "f", "=", "None", "from", "pyemma", ".", "_base", ".", "progress", "import", "ProgressReporter", "pg", "=", "ProgressReporter", "(", ")", "it", "=", "self", ".", "iterator", "(", "stride", ",", "chunk", "=", "chunksize", ",", "return_trajindex", "=", "False", ")", "pg", ".", "register", "(", "it", ".", "n_chunks", ",", "\"saving to csv\"", ")", "with", "it", ",", "pg", ".", "context", "(", ")", ":", "oldtraj", "=", "-", "1", "for", "X", "in", "it", ":", "if", "oldtraj", "!=", "it", ".", "current_trajindex", ":", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "fn", "=", "filenames", "[", "it", ".", "current_trajindex", "]", "self", ".", "logger", ".", "debug", "(", "\"opening file %s for writing csv.\"", "%", "fn", ")", "f", "=", "open", "(", "fn", ",", "'wb'", ")", "oldtraj", "=", "it", ".", "current_trajindex", "np", ".", "savetxt", "(", "f", ",", "X", ",", "*", "*", "kw", ")", "f", ".", "flush", "(", ")", "pg", ".", "update", "(", "1", ",", "0", ")", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")" ]
write all data to csv with numpy.savetxt Parameters ---------- filename : str, optional filename string, which may contain placeholders {itraj} and {stride}: * itraj will be replaced by trajetory index * stride is stride argument of this method If filename is not given, it is being tried to obtain the filenames from the data source of this iterator. extension : str, optional, default='.dat' filename extension of created files overwrite : bool, optional, default=False shall existing files be overwritten? If a file exists, this method will raise. stride : int omit every n'th frame chunksize: int, default=None how many frames to process at once kw : dict, optional named arguments passed into numpy.savetxt (header, seperator etc.) Example ------- Assume you want to save features calculated by some FeatureReader to ASCII: >>> import numpy as np, pyemma >>> import os >>> from pyemma.util.files import TemporaryDirectory >>> from pyemma.util.contexts import settings >>> data = [np.random.random((10,3))] * 3 >>> reader = pyemma.coordinates.source(data) >>> filename = "distances_{itraj}.dat" >>> with TemporaryDirectory() as td, settings(show_progress_bars=False): ... out = os.path.join(td, filename) ... reader.write_to_csv(out, header='', delimiter=';') ... print(sorted(os.listdir(td))) ['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
[ "write", "all", "data", "to", "csv", "with", "numpy", ".", "savetxt" ]
python
train
40.933333
marshmallow-code/marshmallow
src/marshmallow/utils.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/utils.py#L167-L178
def rfcformat(dt, localtime=False): """Return the RFC822-formatted representation of a datetime object. :param datetime dt: The datetime. :param bool localtime: If ``True``, return the date relative to the local timezone instead of UTC, displaying the proper offset, e.g. "Sun, 10 Nov 2013 08:23:45 -0600" """ if not localtime: return formatdate(timegm(dt.utctimetuple())) else: return local_rfcformat(dt)
[ "def", "rfcformat", "(", "dt", ",", "localtime", "=", "False", ")", ":", "if", "not", "localtime", ":", "return", "formatdate", "(", "timegm", "(", "dt", ".", "utctimetuple", "(", ")", ")", ")", "else", ":", "return", "local_rfcformat", "(", "dt", ")" ]
Return the RFC822-formatted representation of a datetime object. :param datetime dt: The datetime. :param bool localtime: If ``True``, return the date relative to the local timezone instead of UTC, displaying the proper offset, e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
[ "Return", "the", "RFC822", "-", "formatted", "representation", "of", "a", "datetime", "object", "." ]
python
train
37.583333
samuel-phan/mssh-copy-id
msshcopyid/utils.py
https://github.com/samuel-phan/mssh-copy-id/blob/59c50eabb74c4e0eeb729266df57c285e6661b0b/msshcopyid/utils.py#L42-L84
def parse_hosts(hosts, ssh_port=None, ssh_config=None): """ Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects. The information about the host are taken in this order of priority: - host: - from the host (string) itself. - user: - from the host (string) itself. - from the `paramiko.config.SSHConfig` object. - current logged user. - port: - from the function argument `port`. - from the `paramiko.config.SSHConfig` object. - default SSH port: 22 :param hosts: list of hosts (string). Eg: ['server1', 'user1@server2'] :param ssh_config: a `paramiko.config.SSHConfig` object. :return: a list of `msshcopyid.Host` objects. """ host_list = [] # list of Host objects current_user = getpass.getuser() for host in hosts: # host_info = {'hostname': 'server1', 'hashknownhosts': 'no', 'user': 'user1'} if ssh_config is not None: host_info = ssh_config.lookup(host) else: host_info = {} # hostname & user if '@' in host: user, hostname = host.split('@', 1) else: hostname = host user = host_info.get('user', current_user) # port port = ssh_port or host_info.get('port', DEFAULT_SSH_PORT) host_list.append(msshcopyid.Host(hostname=hostname, port=port, user=user)) return host_list
[ "def", "parse_hosts", "(", "hosts", ",", "ssh_port", "=", "None", ",", "ssh_config", "=", "None", ")", ":", "host_list", "=", "[", "]", "# list of Host objects", "current_user", "=", "getpass", ".", "getuser", "(", ")", "for", "host", "in", "hosts", ":", "# host_info = {'hostname': 'server1', 'hashknownhosts': 'no', 'user': 'user1'}", "if", "ssh_config", "is", "not", "None", ":", "host_info", "=", "ssh_config", ".", "lookup", "(", "host", ")", "else", ":", "host_info", "=", "{", "}", "# hostname & user", "if", "'@'", "in", "host", ":", "user", ",", "hostname", "=", "host", ".", "split", "(", "'@'", ",", "1", ")", "else", ":", "hostname", "=", "host", "user", "=", "host_info", ".", "get", "(", "'user'", ",", "current_user", ")", "# port", "port", "=", "ssh_port", "or", "host_info", ".", "get", "(", "'port'", ",", "DEFAULT_SSH_PORT", ")", "host_list", ".", "append", "(", "msshcopyid", ".", "Host", "(", "hostname", "=", "hostname", ",", "port", "=", "port", ",", "user", "=", "user", ")", ")", "return", "host_list" ]
Parse a list of hosts (string) and return a list of `msshcopyid.Host` objects. The information about the host are taken in this order of priority: - host: - from the host (string) itself. - user: - from the host (string) itself. - from the `paramiko.config.SSHConfig` object. - current logged user. - port: - from the function argument `port`. - from the `paramiko.config.SSHConfig` object. - default SSH port: 22 :param hosts: list of hosts (string). Eg: ['server1', 'user1@server2'] :param ssh_config: a `paramiko.config.SSHConfig` object. :return: a list of `msshcopyid.Host` objects.
[ "Parse", "a", "list", "of", "hosts", "(", "string", ")", "and", "return", "a", "list", "of", "msshcopyid", ".", "Host", "objects", "." ]
python
train
32.72093
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/postprocess/text.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/text.py#L544-L575
def replace_pattern( df, column: str, *, pat: str, repl: str, new_column: str = None, case: bool = True, regex: bool = True ): """ Replace occurrences of pattern/regex in `column` with some other string See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `pat` (*str*): character sequence or regular expression - `repl` (*str*): replacement string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) - `case` (*boolean*): if true, case sensitive. - `regex` (*boolean*): default true """ new_column = new_column or column df.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex) return df
[ "def", "replace_pattern", "(", "df", ",", "column", ":", "str", ",", "*", ",", "pat", ":", "str", ",", "repl", ":", "str", ",", "new_column", ":", "str", "=", "None", ",", "case", ":", "bool", "=", "True", ",", "regex", ":", "bool", "=", "True", ")", ":", "new_column", "=", "new_column", "or", "column", "df", ".", "loc", "[", ":", ",", "new_column", "]", "=", "df", "[", "column", "]", ".", "str", ".", "replace", "(", "pat", ",", "repl", ",", "case", "=", "case", ",", "regex", "=", "regex", ")", "return", "df" ]
Replace occurrences of pattern/regex in `column` with some other string See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `pat` (*str*): character sequence or regular expression - `repl` (*str*): replacement string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) - `case` (*boolean*): if true, case sensitive. - `regex` (*boolean*): default true
[ "Replace", "occurrences", "of", "pattern", "/", "regex", "in", "column", "with", "some", "other", "string", "See", "[", "pandas", "doc", "]", "(", "https", ":", "//", "pandas", ".", "pydata", ".", "org", "/", "pandas", "-", "docs", "/", "stable", "/", "reference", "/", "api", "/", "pandas", ".", "Series", ".", "str", ".", "replace", ".", "html", ")", "for", "more", "information" ]
python
test
28.40625
geophysics-ubonn/reda
lib/reda/configs/configManager.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L675-L753
def gen_configs_permutate(self, injections_raw, only_same_dipole_length=False, ignore_crossed_dipoles=False, silent=False): """ Create measurement configurations out of a pool of current injections. Use only the provided dipoles for potential dipole selection. This means that we have always reciprocal measurements. Remove quadpoles where electrodes are used both as current and voltage dipoles. Parameters ---------- injections_raw : Nx2 array current injections only_same_dipole_length : bool, optional if True, only generate permutations for the same dipole length ignore_crossed_dipoles : bool, optional If True, potential dipoles will be ignored that lie between current dipoles, e.g. 1-4 3-5. In this case it is possible to not have full normal-reciprocal coverage. silent: bool, optional if True, do not print information on ignored configs (default: False) Returns ------- configs : Nx4 array quadrupoles generated out of the current injections """ injections = np.atleast_2d(injections_raw).astype(int) N = injections.shape[0] measurements = [] for injection in range(0, N): dipole_length = np.abs(injections[injection][1] - injections[injection][0]) # select all dipole EXCEPT for the injection dipole for i in set(range(0, N)) - set([injection]): test_dipole_length = np.abs(injections[i, :][1] - injections[i, :][0]) if (only_same_dipole_length and test_dipole_length != dipole_length): continue quadpole = np.array( [injections[injection, :], injections[i, :]]).flatten() if ignore_crossed_dipoles is True: # check if we need to ignore this dipole # Note: this could be wrong if electrode number are not # ascending! if (quadpole[2] > quadpole[0] and quadpole[2] < quadpole[1]): if not silent: print('A - ignoring', quadpole) elif (quadpole[3] > quadpole[0] and quadpole[3] < quadpole[1]): if not silent: print('B - ignoring', quadpole) else: measurements.append(quadpole) else: # add very quadpole measurements.append(quadpole) # check and remove double use of electrodes filtered = [] for quadpole in measurements: if (not set(quadpole[0:2]).isdisjoint(set(quadpole[2:4]))): if not silent: print('Ignoring quadrupole because of ', 'repeated electrode use:', quadpole) else: filtered.append(quadpole) self.add_to_configs(filtered) return np.array(filtered)
[ "def", "gen_configs_permutate", "(", "self", ",", "injections_raw", ",", "only_same_dipole_length", "=", "False", ",", "ignore_crossed_dipoles", "=", "False", ",", "silent", "=", "False", ")", ":", "injections", "=", "np", ".", "atleast_2d", "(", "injections_raw", ")", ".", "astype", "(", "int", ")", "N", "=", "injections", ".", "shape", "[", "0", "]", "measurements", "=", "[", "]", "for", "injection", "in", "range", "(", "0", ",", "N", ")", ":", "dipole_length", "=", "np", ".", "abs", "(", "injections", "[", "injection", "]", "[", "1", "]", "-", "injections", "[", "injection", "]", "[", "0", "]", ")", "# select all dipole EXCEPT for the injection dipole", "for", "i", "in", "set", "(", "range", "(", "0", ",", "N", ")", ")", "-", "set", "(", "[", "injection", "]", ")", ":", "test_dipole_length", "=", "np", ".", "abs", "(", "injections", "[", "i", ",", ":", "]", "[", "1", "]", "-", "injections", "[", "i", ",", ":", "]", "[", "0", "]", ")", "if", "(", "only_same_dipole_length", "and", "test_dipole_length", "!=", "dipole_length", ")", ":", "continue", "quadpole", "=", "np", ".", "array", "(", "[", "injections", "[", "injection", ",", ":", "]", ",", "injections", "[", "i", ",", ":", "]", "]", ")", ".", "flatten", "(", ")", "if", "ignore_crossed_dipoles", "is", "True", ":", "# check if we need to ignore this dipole", "# Note: this could be wrong if electrode number are not", "# ascending!", "if", "(", "quadpole", "[", "2", "]", ">", "quadpole", "[", "0", "]", "and", "quadpole", "[", "2", "]", "<", "quadpole", "[", "1", "]", ")", ":", "if", "not", "silent", ":", "print", "(", "'A - ignoring'", ",", "quadpole", ")", "elif", "(", "quadpole", "[", "3", "]", ">", "quadpole", "[", "0", "]", "and", "quadpole", "[", "3", "]", "<", "quadpole", "[", "1", "]", ")", ":", "if", "not", "silent", ":", "print", "(", "'B - ignoring'", ",", "quadpole", ")", "else", ":", "measurements", ".", "append", "(", "quadpole", ")", "else", ":", "# add very quadpole", "measurements", ".", "append", "(", "quadpole", ")", "# check and remove double use of electrodes", "filtered", "=", "[", "]", "for", "quadpole", "in", "measurements", ":", "if", "(", "not", "set", "(", "quadpole", "[", "0", ":", "2", "]", ")", ".", "isdisjoint", "(", "set", "(", "quadpole", "[", "2", ":", "4", "]", ")", ")", ")", ":", "if", "not", "silent", ":", "print", "(", "'Ignoring quadrupole because of '", ",", "'repeated electrode use:'", ",", "quadpole", ")", "else", ":", "filtered", ".", "append", "(", "quadpole", ")", "self", ".", "add_to_configs", "(", "filtered", ")", "return", "np", ".", "array", "(", "filtered", ")" ]
Create measurement configurations out of a pool of current injections. Use only the provided dipoles for potential dipole selection. This means that we have always reciprocal measurements. Remove quadpoles where electrodes are used both as current and voltage dipoles. Parameters ---------- injections_raw : Nx2 array current injections only_same_dipole_length : bool, optional if True, only generate permutations for the same dipole length ignore_crossed_dipoles : bool, optional If True, potential dipoles will be ignored that lie between current dipoles, e.g. 1-4 3-5. In this case it is possible to not have full normal-reciprocal coverage. silent: bool, optional if True, do not print information on ignored configs (default: False) Returns ------- configs : Nx4 array quadrupoles generated out of the current injections
[ "Create", "measurement", "configurations", "out", "of", "a", "pool", "of", "current", "injections", ".", "Use", "only", "the", "provided", "dipoles", "for", "potential", "dipole", "selection", ".", "This", "means", "that", "we", "have", "always", "reciprocal", "measurements", "." ]
python
train
41.949367
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1523-L1529
def p_unrelate_statement_using_1(self, p): '''statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name''' p[0] = UnrelateUsingNode(from_variable_name=p[2], to_variable_name=p[4], rel_id=p[6], phrase=None, using_variable_name=p[8])
[ "def", "p_unrelate_statement_using_1", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "UnrelateUsingNode", "(", "from_variable_name", "=", "p", "[", "2", "]", ",", "to_variable_name", "=", "p", "[", "4", "]", ",", "rel_id", "=", "p", "[", "6", "]", ",", "phrase", "=", "None", ",", "using_variable_name", "=", "p", "[", "8", "]", ")" ]
statement : UNRELATE instance_name FROM instance_name ACROSS rel_id USING instance_name
[ "statement", ":", "UNRELATE", "instance_name", "FROM", "instance_name", "ACROSS", "rel_id", "USING", "instance_name" ]
python
test
57.571429
475Cumulus/TBone
tbone/db/models.py
https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/db/models.py#L245-L285
async def update(self, db=None, data=None): ''' Update the entire document by replacing its content with new data, retaining its primary key ''' db = db or self.db if data: # update model explicitely with a new data structure # merge the current model's data with the new data self.import_data(data) # prepare data for database update data = self.prepare_data() # data = {x: ndata[x] for x in ndata if x in data or x == self.primary_key} else: data = self.export_data(native=True) if self.primary_key not in data or data[self.primary_key] is None: raise Exception('Missing object primary key') query = {self.primary_key: self.pk} for i in self.connection_retries(): try: result = await db[self.get_collection_name()].find_one_and_replace( filter=query, replacement=data, return_document=ReturnDocument.AFTER ) if result: updated_obj = self.create_model(result) updated_obj._db = db # emit post save asyncio.ensure_future(post_save.send( sender=self.__class__, db=db, instance=updated_obj, created=False) ) return updated_obj return None except ConnectionFailure as ex: exceed = await self.check_reconnect_tries_and_wait(i, 'update') if exceed: raise ex
[ "async", "def", "update", "(", "self", ",", "db", "=", "None", ",", "data", "=", "None", ")", ":", "db", "=", "db", "or", "self", ".", "db", "if", "data", ":", "# update model explicitely with a new data structure", "# merge the current model's data with the new data", "self", ".", "import_data", "(", "data", ")", "# prepare data for database update", "data", "=", "self", ".", "prepare_data", "(", ")", "# data = {x: ndata[x] for x in ndata if x in data or x == self.primary_key}", "else", ":", "data", "=", "self", ".", "export_data", "(", "native", "=", "True", ")", "if", "self", ".", "primary_key", "not", "in", "data", "or", "data", "[", "self", ".", "primary_key", "]", "is", "None", ":", "raise", "Exception", "(", "'Missing object primary key'", ")", "query", "=", "{", "self", ".", "primary_key", ":", "self", ".", "pk", "}", "for", "i", "in", "self", ".", "connection_retries", "(", ")", ":", "try", ":", "result", "=", "await", "db", "[", "self", ".", "get_collection_name", "(", ")", "]", ".", "find_one_and_replace", "(", "filter", "=", "query", ",", "replacement", "=", "data", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "if", "result", ":", "updated_obj", "=", "self", ".", "create_model", "(", "result", ")", "updated_obj", ".", "_db", "=", "db", "# emit post save", "asyncio", ".", "ensure_future", "(", "post_save", ".", "send", "(", "sender", "=", "self", ".", "__class__", ",", "db", "=", "db", ",", "instance", "=", "updated_obj", ",", "created", "=", "False", ")", ")", "return", "updated_obj", "return", "None", "except", "ConnectionFailure", "as", "ex", ":", "exceed", "=", "await", "self", ".", "check_reconnect_tries_and_wait", "(", "i", ",", "'update'", ")", "if", "exceed", ":", "raise", "ex" ]
Update the entire document by replacing its content with new data, retaining its primary key
[ "Update", "the", "entire", "document", "by", "replacing", "its", "content", "with", "new", "data", "retaining", "its", "primary", "key" ]
python
train
41.146341
wandb/client
wandb/apis/public.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/public.py#L115-L141
def _parse_path(self, path): """Parses paths in the following formats: url: username/project/runs/run_id path: username/project/run_id docker: username/project:run_id username is optional and will fallback to the current logged in user. """ run = self.settings['run'] project = self.settings['project'] username = self.settings['username'] parts = path.replace("/runs/", "/").split("/") if ":" in parts[-1]: run = parts[-1].split(":")[-1] parts[-1] = parts[-1].split(":")[0] elif parts[-1]: run = parts[-1] if len(parts) > 1: project = parts[1] if username and run == project: project = parts[0] else: username = parts[0] else: project = parts[0] return (username, project, run)
[ "def", "_parse_path", "(", "self", ",", "path", ")", ":", "run", "=", "self", ".", "settings", "[", "'run'", "]", "project", "=", "self", ".", "settings", "[", "'project'", "]", "username", "=", "self", ".", "settings", "[", "'username'", "]", "parts", "=", "path", ".", "replace", "(", "\"/runs/\"", ",", "\"/\"", ")", ".", "split", "(", "\"/\"", ")", "if", "\":\"", "in", "parts", "[", "-", "1", "]", ":", "run", "=", "parts", "[", "-", "1", "]", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", "parts", "[", "-", "1", "]", "=", "parts", "[", "-", "1", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", "elif", "parts", "[", "-", "1", "]", ":", "run", "=", "parts", "[", "-", "1", "]", "if", "len", "(", "parts", ")", ">", "1", ":", "project", "=", "parts", "[", "1", "]", "if", "username", "and", "run", "==", "project", ":", "project", "=", "parts", "[", "0", "]", "else", ":", "username", "=", "parts", "[", "0", "]", "else", ":", "project", "=", "parts", "[", "0", "]", "return", "(", "username", ",", "project", ",", "run", ")" ]
Parses paths in the following formats: url: username/project/runs/run_id path: username/project/run_id docker: username/project:run_id username is optional and will fallback to the current logged in user.
[ "Parses", "paths", "in", "the", "following", "formats", ":" ]
python
train
32.962963
apache/airflow
airflow/hooks/S3_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L233-L277
def select_key(self, key, bucket_name=None, expression='SELECT * FROM S3Object', expression_type='SQL', input_serialization=None, output_serialization=None): """ Reads a key with S3 Select. :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str :param expression: S3 Select expression :type expression: str :param expression_type: S3 Select expression type :type expression_type: str :param input_serialization: S3 Select input data serialization format :type input_serialization: dict :param output_serialization: S3 Select output data serialization format :type output_serialization: dict :return: retrieved subset of original data by S3 Select :rtype: str .. seealso:: For more details about S3 Select parameters: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content """ if input_serialization is None: input_serialization = {'CSV': {}} if output_serialization is None: output_serialization = {'CSV': {}} if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) response = self.get_conn().select_object_content( Bucket=bucket_name, Key=key, Expression=expression, ExpressionType=expression_type, InputSerialization=input_serialization, OutputSerialization=output_serialization) return ''.join(event['Records']['Payload'].decode('utf-8') for event in response['Payload'] if 'Records' in event)
[ "def", "select_key", "(", "self", ",", "key", ",", "bucket_name", "=", "None", ",", "expression", "=", "'SELECT * FROM S3Object'", ",", "expression_type", "=", "'SQL'", ",", "input_serialization", "=", "None", ",", "output_serialization", "=", "None", ")", ":", "if", "input_serialization", "is", "None", ":", "input_serialization", "=", "{", "'CSV'", ":", "{", "}", "}", "if", "output_serialization", "is", "None", ":", "output_serialization", "=", "{", "'CSV'", ":", "{", "}", "}", "if", "not", "bucket_name", ":", "(", "bucket_name", ",", "key", ")", "=", "self", ".", "parse_s3_url", "(", "key", ")", "response", "=", "self", ".", "get_conn", "(", ")", ".", "select_object_content", "(", "Bucket", "=", "bucket_name", ",", "Key", "=", "key", ",", "Expression", "=", "expression", ",", "ExpressionType", "=", "expression_type", ",", "InputSerialization", "=", "input_serialization", ",", "OutputSerialization", "=", "output_serialization", ")", "return", "''", ".", "join", "(", "event", "[", "'Records'", "]", "[", "'Payload'", "]", ".", "decode", "(", "'utf-8'", ")", "for", "event", "in", "response", "[", "'Payload'", "]", "if", "'Records'", "in", "event", ")" ]
Reads a key with S3 Select. :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which the file is stored :type bucket_name: str :param expression: S3 Select expression :type expression: str :param expression_type: S3 Select expression type :type expression_type: str :param input_serialization: S3 Select input data serialization format :type input_serialization: dict :param output_serialization: S3 Select output data serialization format :type output_serialization: dict :return: retrieved subset of original data by S3 Select :rtype: str .. seealso:: For more details about S3 Select parameters: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
[ "Reads", "a", "key", "with", "S3", "Select", "." ]
python
test
40.911111
csparpa/pyowm
pyowm/stationsapi30/station_parser.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/station_parser.py#L34-L63
def parse_JSON(self, JSON_string): """ Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON data. :param JSON_string: a raw JSON string :type JSON_string: str :return: a *pyowm.stationsapi30.station.Station** instance or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result """ if JSON_string is None: raise parse_response_error.ParseResponseError('JSON data is None') d = json.loads(JSON_string) try: id = d.get('ID', None) or d.get('id', None) external_id = d.get('external_id', None) lon = d.get('longitude', None) lat = d.get('latitude', None) alt = d.get('altitude', None) except KeyError as e: raise parse_response_error.ParseResponseError('Impossible to parse JSON: %s' % e) name = d.get('name', None) rank = d.get('rank', None) created_at = d.get('created_at', None) updated_at = d.get('updated_at', None) return Station(id, created_at, updated_at, external_id, name, lon, lat, alt, rank)
[ "def", "parse_JSON", "(", "self", ",", "JSON_string", ")", ":", "if", "JSON_string", "is", "None", ":", "raise", "parse_response_error", ".", "ParseResponseError", "(", "'JSON data is None'", ")", "d", "=", "json", ".", "loads", "(", "JSON_string", ")", "try", ":", "id", "=", "d", ".", "get", "(", "'ID'", ",", "None", ")", "or", "d", ".", "get", "(", "'id'", ",", "None", ")", "external_id", "=", "d", ".", "get", "(", "'external_id'", ",", "None", ")", "lon", "=", "d", ".", "get", "(", "'longitude'", ",", "None", ")", "lat", "=", "d", ".", "get", "(", "'latitude'", ",", "None", ")", "alt", "=", "d", ".", "get", "(", "'altitude'", ",", "None", ")", "except", "KeyError", "as", "e", ":", "raise", "parse_response_error", ".", "ParseResponseError", "(", "'Impossible to parse JSON: %s'", "%", "e", ")", "name", "=", "d", ".", "get", "(", "'name'", ",", "None", ")", "rank", "=", "d", ".", "get", "(", "'rank'", ",", "None", ")", "created_at", "=", "d", ".", "get", "(", "'created_at'", ",", "None", ")", "updated_at", "=", "d", ".", "get", "(", "'updated_at'", ",", "None", ")", "return", "Station", "(", "id", ",", "created_at", ",", "updated_at", ",", "external_id", ",", "name", ",", "lon", ",", "lat", ",", "alt", ",", "rank", ")" ]
Parses a *pyowm.stationsapi30.station.Station* instance out of raw JSON data. :param JSON_string: a raw JSON string :type JSON_string: str :return: a *pyowm.stationsapi30.station.Station** instance or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result
[ "Parses", "a", "*", "pyowm", ".", "stationsapi30", ".", "station", ".", "Station", "*", "instance", "out", "of", "raw", "JSON", "data", "." ]
python
train
41.266667
midasplatform/pydas
pydas/api.py
https://github.com/midasplatform/pydas/blob/e5f9e96e754fb2dc5da187b05e4abc77a9b2affd/pydas/api.py#L331-L346
def _create_folder(local_folder, parent_folder_id): """ Function for creating a remote folder and returning the id. This should be a building block for user-level functions. :param local_folder: full path to a local folder :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the new folder will be added :type parent_folder_id: int | long :returns: id of the remote folder that was created :rtype: int | long """ new_folder = session.communicator.create_folder( session.token, os.path.basename(local_folder), parent_folder_id) return new_folder['folder_id']
[ "def", "_create_folder", "(", "local_folder", ",", "parent_folder_id", ")", ":", "new_folder", "=", "session", ".", "communicator", ".", "create_folder", "(", "session", ".", "token", ",", "os", ".", "path", ".", "basename", "(", "local_folder", ")", ",", "parent_folder_id", ")", "return", "new_folder", "[", "'folder_id'", "]" ]
Function for creating a remote folder and returning the id. This should be a building block for user-level functions. :param local_folder: full path to a local folder :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the new folder will be added :type parent_folder_id: int | long :returns: id of the remote folder that was created :rtype: int | long
[ "Function", "for", "creating", "a", "remote", "folder", "and", "returning", "the", "id", ".", "This", "should", "be", "a", "building", "block", "for", "user", "-", "level", "functions", "." ]
python
valid
41.4375
ebroecker/canmatrix
src/canmatrix/formats/arxml.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/formats/arxml.py#L942-L1171
def get_signals(signal_array, frame, root_or_cache, ns, multiplex_id, float_factory): # type: (typing.Sequence[_Element], canmatrix.Frame, _DocRoot, str, _MultiplexId, typing.Callable) -> None """Add signals from xml to the Frame.""" global signal_rxs group_id = 1 if signal_array is None: # Empty signalarray - nothing to do return for signal in signal_array: compu_method = None motorola = get_child(signal, "PACKING-BYTE-ORDER", root_or_cache, ns) start_bit = get_child(signal, "START-POSITION", root_or_cache, ns) isignal = get_child(signal, "SIGNAL", root_or_cache, ns) if isignal is None: isignal = get_child(signal, "I-SIGNAL", root_or_cache, ns) if isignal is None: isignal = get_child(signal, "I-SIGNAL-GROUP", root_or_cache, ns) if isignal is not None: logger.debug("get_signals: found I-SIGNAL-GROUP ") isignal_array = find_children_by_path(isignal, "I-SIGNAL", root_or_cache, ns) get_sys_signals(isignal, isignal_array, frame, group_id, ns) group_id = group_id + 1 continue if isignal is None: logger.debug( 'Frame %s, no isignal for %s found', frame.name, get_child(signal, "SHORT-NAME", root_or_cache, ns).text) base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns) signal_name = None # type: typing.Optional[str] signal_name_elem = get_child(isignal, "LONG-NAME", root_or_cache, ns) if signal_name_elem is not None: signal_name_elem = get_child(signal_name_elem, "L-4", root_or_cache, ns) if signal_name_elem is not None: signal_name = signal_name_elem.text system_signal = get_child(isignal, "SYSTEM-SIGNAL", root_or_cache, ns) if system_signal is None: logger.debug('Frame %s, signal %s has no system-signal', frame.name, isignal.tag) if "SYSTEM-SIGNAL-GROUP" in system_signal.tag: system_signals = find_children_by_path(system_signal, "SYSTEM-SIGNAL-REFS/SYSTEM-SIGNAL", root_or_cache, ns) get_sys_signals(system_signal, system_signals, frame, group_id, ns) group_id = group_id + 1 continue length = get_child(isignal, "LENGTH", root_or_cache, ns) if length is None: length = get_child(system_signal, "LENGTH", root_or_cache, ns) name = get_child(system_signal, "SHORT-NAME", root_or_cache, ns) unit_element = get_child(isignal, "UNIT", root_or_cache, ns) display_name = get_child(unit_element, "DISPLAY-NAME", root_or_cache, ns) if display_name is not None: signal_unit = display_name.text else: signal_unit = "" signal_min = None # type: canmatrix.types.OptionalPhysicalValue signal_max = None # type: canmatrix.types.OptionalPhysicalValue receiver = [] # type: typing.List[str] signal_description = get_element_desc(system_signal, root_or_cache, ns) datatype = get_child(system_signal, "DATA-TYPE", root_or_cache, ns) if datatype is None: # AR4? data_constr = None compu_method = None base_type = None for test_signal in [isignal, system_signal]: if data_constr is None: data_constr = get_child(test_signal, "DATA-CONSTR", root_or_cache, ns) if compu_method is None: compu_method = get_child(test_signal, "COMPU-METHOD", root_or_cache, ns) if base_type is None: base_type = get_child(test_signal, "BASE-TYPE", root_or_cache, ns) lower = get_child(data_constr, "LOWER-LIMIT", root_or_cache, ns) upper = get_child(data_constr, "UPPER-LIMIT", root_or_cache, ns) encoding = None # TODO - find encoding in AR4 else: lower = get_child(datatype, "LOWER-LIMIT", root_or_cache, ns) upper = get_child(datatype, "UPPER-LIMIT", root_or_cache, ns) encoding = get_child(datatype, "ENCODING", root_or_cache, ns) if encoding is not None and (encoding.text == "SINGLE" or encoding.text == "DOUBLE"): is_float = True else: is_float = False if lower is not None and upper is not None: signal_min = float_factory(lower.text) signal_max = float_factory(upper.text) datdefprops = get_child(datatype, "SW-DATA-DEF-PROPS", root_or_cache, ns) if compu_method is None: compu_method = get_child(datdefprops, "COMPU-METHOD", root_or_cache, ns) if compu_method is None: # AR4 compu_method = get_child(isignal, "COMPU-METHOD", root_or_cache, ns) base_type = get_child(isignal, "BASE-TYPE", root_or_cache, ns) encoding = get_child(base_type, "BASE-TYPE-ENCODING", root_or_cache, ns) if encoding is not None and encoding.text == "IEEE754": is_float = True if compu_method is None: logger.debug('No Compmethod found!! - try alternate scheme 1.') networkrep = get_child(isignal, "NETWORK-REPRESENTATION-PROPS", root_or_cache, ns) data_def_props_var = get_child(networkrep, "SW-DATA-DEF-PROPS-VARIANTS", root_or_cache, ns) data_def_props_cond = get_child(data_def_props_var, "SW-DATA-DEF-PROPS-CONDITIONAL", root_or_cache, ns) if data_def_props_cond is not None: try: compu_method = get_child(data_def_props_cond, "COMPU-METHOD", root_or_cache, ns) except: logger.debug('No valid compu method found for this - check ARXML file!!') compu_method = None ##################################################################################################### # no found compu-method fuzzy search in systemsignal: ##################################################################################################### if compu_method is None: logger.debug('No Compmethod found!! - fuzzy search in syssignal.') compu_method = get_child(system_signal, "COMPU-METHOD", root_or_cache, ns) # decode compuMethod: (values, factor, offset, unit_elem, const) = decode_compu_method(compu_method, root_or_cache, ns, float_factory) if signal_min is not None: signal_min *= factor signal_min += offset if signal_max is not None: signal_max *= factor signal_max += offset if base_type is None: base_type = get_child(datdefprops, "BASE-TYPE", root_or_cache, ns) if base_type is not None: type_name = get_element_name(base_type, ns) if type_name[0] == 'u': is_signed = False # unsigned else: is_signed = True # signed else: is_signed = True # signed if unit_elem is not None: longname = get_child(unit_elem, "LONG-NAME", root_or_cache, ns) ##################################################################################################### # Modification to support obtaining the Signals Unit by DISPLAY-NAME. 07June16 ##################################################################################################### display_name = None try: display_name = get_child(unit_elem, "DISPLAY-NAME", root_or_cache, ns) except: logger.debug('No Unit Display name found!! - using long name') if display_name is not None: signal_unit = display_name.text else: l4 = get_child(longname, "L-4", root_or_cache, ns) if l4 is not None: signal_unit = l4.text init_list = find_children_by_path(system_signal, "INIT-VALUE/VALUE", root_or_cache, ns) if not init_list: init_list = find_children_by_path(isignal, "INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE", root_or_cache, ns) # #AR4.2 if init_list: initvalue = init_list[0] else: initvalue = None is_little_endian = False if motorola is not None: if motorola.text == 'MOST-SIGNIFICANT-BYTE-LAST': is_little_endian = True else: logger.debug('no name byte order for signal' + name.text) if name is None: logger.debug('no name for signal given') if start_bit is None: logger.debug('no startBit for signal given') if length is None: logger.debug('no length for signal given') if start_bit is not None: new_signal = canmatrix.Signal( name.text, start_bit=int(start_bit.text), size=int(length.text), is_little_endian=is_little_endian, is_signed=is_signed, factor=factor, offset=offset, unit=signal_unit, receivers=receiver, multiplex=multiplex_id, comment=signal_description, is_float=is_float) if signal_min is not None: new_signal.min = signal_min if signal_max is not None: new_signal.max = signal_max if new_signal.is_little_endian == 0: # startbit of motorola coded signals are MSB in arxml new_signal.set_startbit(int(start_bit.text), bitNumbering=1) # save signal, to determin receiver-ECUs for this signal later signal_rxs[system_signal] = new_signal if base_type is not None: temp = get_child(base_type, "SHORT-NAME", root_or_cache, ns) if temp is not None and "boolean" == temp.text: new_signal.add_values(1, "TRUE") new_signal.add_values(0, "FALSE") if initvalue is not None and initvalue.text is not None: initvalue.text = canmatrix.utils.guess_value(initvalue.text) new_signal._initValue = float_factory(initvalue.text) new_signal.add_attribute("GenSigStartValue", str(new_signal._initValue)) else: new_signal._initValue = 0 for key, value in list(values.items()): new_signal.add_values(key, value) if signal_name is not None: new_signal.add_attribute("LongName", signal_name) frame.add_signal(new_signal)
[ "def", "get_signals", "(", "signal_array", ",", "frame", ",", "root_or_cache", ",", "ns", ",", "multiplex_id", ",", "float_factory", ")", ":", "# type: (typing.Sequence[_Element], canmatrix.Frame, _DocRoot, str, _MultiplexId, typing.Callable) -> None", "global", "signal_rxs", "group_id", "=", "1", "if", "signal_array", "is", "None", ":", "# Empty signalarray - nothing to do", "return", "for", "signal", "in", "signal_array", ":", "compu_method", "=", "None", "motorola", "=", "get_child", "(", "signal", ",", "\"PACKING-BYTE-ORDER\"", ",", "root_or_cache", ",", "ns", ")", "start_bit", "=", "get_child", "(", "signal", ",", "\"START-POSITION\"", ",", "root_or_cache", ",", "ns", ")", "isignal", "=", "get_child", "(", "signal", ",", "\"SIGNAL\"", ",", "root_or_cache", ",", "ns", ")", "if", "isignal", "is", "None", ":", "isignal", "=", "get_child", "(", "signal", ",", "\"I-SIGNAL\"", ",", "root_or_cache", ",", "ns", ")", "if", "isignal", "is", "None", ":", "isignal", "=", "get_child", "(", "signal", ",", "\"I-SIGNAL-GROUP\"", ",", "root_or_cache", ",", "ns", ")", "if", "isignal", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"get_signals: found I-SIGNAL-GROUP \"", ")", "isignal_array", "=", "find_children_by_path", "(", "isignal", ",", "\"I-SIGNAL\"", ",", "root_or_cache", ",", "ns", ")", "get_sys_signals", "(", "isignal", ",", "isignal_array", ",", "frame", ",", "group_id", ",", "ns", ")", "group_id", "=", "group_id", "+", "1", "continue", "if", "isignal", "is", "None", ":", "logger", ".", "debug", "(", "'Frame %s, no isignal for %s found'", ",", "frame", ".", "name", ",", "get_child", "(", "signal", ",", "\"SHORT-NAME\"", ",", "root_or_cache", ",", "ns", ")", ".", "text", ")", "base_type", "=", "get_child", "(", "isignal", ",", "\"BASE-TYPE\"", ",", "root_or_cache", ",", "ns", ")", "signal_name", "=", "None", "# type: typing.Optional[str]", "signal_name_elem", "=", "get_child", "(", "isignal", ",", "\"LONG-NAME\"", ",", "root_or_cache", ",", "ns", ")", "if", "signal_name_elem", "is", "not", "None", ":", "signal_name_elem", "=", "get_child", "(", "signal_name_elem", ",", "\"L-4\"", ",", "root_or_cache", ",", "ns", ")", "if", "signal_name_elem", "is", "not", "None", ":", "signal_name", "=", "signal_name_elem", ".", "text", "system_signal", "=", "get_child", "(", "isignal", ",", "\"SYSTEM-SIGNAL\"", ",", "root_or_cache", ",", "ns", ")", "if", "system_signal", "is", "None", ":", "logger", ".", "debug", "(", "'Frame %s, signal %s has no system-signal'", ",", "frame", ".", "name", ",", "isignal", ".", "tag", ")", "if", "\"SYSTEM-SIGNAL-GROUP\"", "in", "system_signal", ".", "tag", ":", "system_signals", "=", "find_children_by_path", "(", "system_signal", ",", "\"SYSTEM-SIGNAL-REFS/SYSTEM-SIGNAL\"", ",", "root_or_cache", ",", "ns", ")", "get_sys_signals", "(", "system_signal", ",", "system_signals", ",", "frame", ",", "group_id", ",", "ns", ")", "group_id", "=", "group_id", "+", "1", "continue", "length", "=", "get_child", "(", "isignal", ",", "\"LENGTH\"", ",", "root_or_cache", ",", "ns", ")", "if", "length", "is", "None", ":", "length", "=", "get_child", "(", "system_signal", ",", "\"LENGTH\"", ",", "root_or_cache", ",", "ns", ")", "name", "=", "get_child", "(", "system_signal", ",", "\"SHORT-NAME\"", ",", "root_or_cache", ",", "ns", ")", "unit_element", "=", "get_child", "(", "isignal", ",", "\"UNIT\"", ",", "root_or_cache", ",", "ns", ")", "display_name", "=", "get_child", "(", "unit_element", ",", "\"DISPLAY-NAME\"", ",", "root_or_cache", ",", "ns", ")", "if", "display_name", "is", "not", "None", ":", "signal_unit", "=", "display_name", ".", "text", "else", ":", "signal_unit", "=", "\"\"", "signal_min", "=", "None", "# type: canmatrix.types.OptionalPhysicalValue", "signal_max", "=", "None", "# type: canmatrix.types.OptionalPhysicalValue", "receiver", "=", "[", "]", "# type: typing.List[str]", "signal_description", "=", "get_element_desc", "(", "system_signal", ",", "root_or_cache", ",", "ns", ")", "datatype", "=", "get_child", "(", "system_signal", ",", "\"DATA-TYPE\"", ",", "root_or_cache", ",", "ns", ")", "if", "datatype", "is", "None", ":", "# AR4?", "data_constr", "=", "None", "compu_method", "=", "None", "base_type", "=", "None", "for", "test_signal", "in", "[", "isignal", ",", "system_signal", "]", ":", "if", "data_constr", "is", "None", ":", "data_constr", "=", "get_child", "(", "test_signal", ",", "\"DATA-CONSTR\"", ",", "root_or_cache", ",", "ns", ")", "if", "compu_method", "is", "None", ":", "compu_method", "=", "get_child", "(", "test_signal", ",", "\"COMPU-METHOD\"", ",", "root_or_cache", ",", "ns", ")", "if", "base_type", "is", "None", ":", "base_type", "=", "get_child", "(", "test_signal", ",", "\"BASE-TYPE\"", ",", "root_or_cache", ",", "ns", ")", "lower", "=", "get_child", "(", "data_constr", ",", "\"LOWER-LIMIT\"", ",", "root_or_cache", ",", "ns", ")", "upper", "=", "get_child", "(", "data_constr", ",", "\"UPPER-LIMIT\"", ",", "root_or_cache", ",", "ns", ")", "encoding", "=", "None", "# TODO - find encoding in AR4", "else", ":", "lower", "=", "get_child", "(", "datatype", ",", "\"LOWER-LIMIT\"", ",", "root_or_cache", ",", "ns", ")", "upper", "=", "get_child", "(", "datatype", ",", "\"UPPER-LIMIT\"", ",", "root_or_cache", ",", "ns", ")", "encoding", "=", "get_child", "(", "datatype", ",", "\"ENCODING\"", ",", "root_or_cache", ",", "ns", ")", "if", "encoding", "is", "not", "None", "and", "(", "encoding", ".", "text", "==", "\"SINGLE\"", "or", "encoding", ".", "text", "==", "\"DOUBLE\"", ")", ":", "is_float", "=", "True", "else", ":", "is_float", "=", "False", "if", "lower", "is", "not", "None", "and", "upper", "is", "not", "None", ":", "signal_min", "=", "float_factory", "(", "lower", ".", "text", ")", "signal_max", "=", "float_factory", "(", "upper", ".", "text", ")", "datdefprops", "=", "get_child", "(", "datatype", ",", "\"SW-DATA-DEF-PROPS\"", ",", "root_or_cache", ",", "ns", ")", "if", "compu_method", "is", "None", ":", "compu_method", "=", "get_child", "(", "datdefprops", ",", "\"COMPU-METHOD\"", ",", "root_or_cache", ",", "ns", ")", "if", "compu_method", "is", "None", ":", "# AR4", "compu_method", "=", "get_child", "(", "isignal", ",", "\"COMPU-METHOD\"", ",", "root_or_cache", ",", "ns", ")", "base_type", "=", "get_child", "(", "isignal", ",", "\"BASE-TYPE\"", ",", "root_or_cache", ",", "ns", ")", "encoding", "=", "get_child", "(", "base_type", ",", "\"BASE-TYPE-ENCODING\"", ",", "root_or_cache", ",", "ns", ")", "if", "encoding", "is", "not", "None", "and", "encoding", ".", "text", "==", "\"IEEE754\"", ":", "is_float", "=", "True", "if", "compu_method", "is", "None", ":", "logger", ".", "debug", "(", "'No Compmethod found!! - try alternate scheme 1.'", ")", "networkrep", "=", "get_child", "(", "isignal", ",", "\"NETWORK-REPRESENTATION-PROPS\"", ",", "root_or_cache", ",", "ns", ")", "data_def_props_var", "=", "get_child", "(", "networkrep", ",", "\"SW-DATA-DEF-PROPS-VARIANTS\"", ",", "root_or_cache", ",", "ns", ")", "data_def_props_cond", "=", "get_child", "(", "data_def_props_var", ",", "\"SW-DATA-DEF-PROPS-CONDITIONAL\"", ",", "root_or_cache", ",", "ns", ")", "if", "data_def_props_cond", "is", "not", "None", ":", "try", ":", "compu_method", "=", "get_child", "(", "data_def_props_cond", ",", "\"COMPU-METHOD\"", ",", "root_or_cache", ",", "ns", ")", "except", ":", "logger", ".", "debug", "(", "'No valid compu method found for this - check ARXML file!!'", ")", "compu_method", "=", "None", "#####################################################################################################", "# no found compu-method fuzzy search in systemsignal:", "#####################################################################################################", "if", "compu_method", "is", "None", ":", "logger", ".", "debug", "(", "'No Compmethod found!! - fuzzy search in syssignal.'", ")", "compu_method", "=", "get_child", "(", "system_signal", ",", "\"COMPU-METHOD\"", ",", "root_or_cache", ",", "ns", ")", "# decode compuMethod:", "(", "values", ",", "factor", ",", "offset", ",", "unit_elem", ",", "const", ")", "=", "decode_compu_method", "(", "compu_method", ",", "root_or_cache", ",", "ns", ",", "float_factory", ")", "if", "signal_min", "is", "not", "None", ":", "signal_min", "*=", "factor", "signal_min", "+=", "offset", "if", "signal_max", "is", "not", "None", ":", "signal_max", "*=", "factor", "signal_max", "+=", "offset", "if", "base_type", "is", "None", ":", "base_type", "=", "get_child", "(", "datdefprops", ",", "\"BASE-TYPE\"", ",", "root_or_cache", ",", "ns", ")", "if", "base_type", "is", "not", "None", ":", "type_name", "=", "get_element_name", "(", "base_type", ",", "ns", ")", "if", "type_name", "[", "0", "]", "==", "'u'", ":", "is_signed", "=", "False", "# unsigned", "else", ":", "is_signed", "=", "True", "# signed", "else", ":", "is_signed", "=", "True", "# signed", "if", "unit_elem", "is", "not", "None", ":", "longname", "=", "get_child", "(", "unit_elem", ",", "\"LONG-NAME\"", ",", "root_or_cache", ",", "ns", ")", "#####################################################################################################", "# Modification to support obtaining the Signals Unit by DISPLAY-NAME. 07June16", "#####################################################################################################", "display_name", "=", "None", "try", ":", "display_name", "=", "get_child", "(", "unit_elem", ",", "\"DISPLAY-NAME\"", ",", "root_or_cache", ",", "ns", ")", "except", ":", "logger", ".", "debug", "(", "'No Unit Display name found!! - using long name'", ")", "if", "display_name", "is", "not", "None", ":", "signal_unit", "=", "display_name", ".", "text", "else", ":", "l4", "=", "get_child", "(", "longname", ",", "\"L-4\"", ",", "root_or_cache", ",", "ns", ")", "if", "l4", "is", "not", "None", ":", "signal_unit", "=", "l4", ".", "text", "init_list", "=", "find_children_by_path", "(", "system_signal", ",", "\"INIT-VALUE/VALUE\"", ",", "root_or_cache", ",", "ns", ")", "if", "not", "init_list", ":", "init_list", "=", "find_children_by_path", "(", "isignal", ",", "\"INIT-VALUE/NUMERICAL-VALUE-SPECIFICATION/VALUE\"", ",", "root_or_cache", ",", "ns", ")", "# #AR4.2", "if", "init_list", ":", "initvalue", "=", "init_list", "[", "0", "]", "else", ":", "initvalue", "=", "None", "is_little_endian", "=", "False", "if", "motorola", "is", "not", "None", ":", "if", "motorola", ".", "text", "==", "'MOST-SIGNIFICANT-BYTE-LAST'", ":", "is_little_endian", "=", "True", "else", ":", "logger", ".", "debug", "(", "'no name byte order for signal'", "+", "name", ".", "text", ")", "if", "name", "is", "None", ":", "logger", ".", "debug", "(", "'no name for signal given'", ")", "if", "start_bit", "is", "None", ":", "logger", ".", "debug", "(", "'no startBit for signal given'", ")", "if", "length", "is", "None", ":", "logger", ".", "debug", "(", "'no length for signal given'", ")", "if", "start_bit", "is", "not", "None", ":", "new_signal", "=", "canmatrix", ".", "Signal", "(", "name", ".", "text", ",", "start_bit", "=", "int", "(", "start_bit", ".", "text", ")", ",", "size", "=", "int", "(", "length", ".", "text", ")", ",", "is_little_endian", "=", "is_little_endian", ",", "is_signed", "=", "is_signed", ",", "factor", "=", "factor", ",", "offset", "=", "offset", ",", "unit", "=", "signal_unit", ",", "receivers", "=", "receiver", ",", "multiplex", "=", "multiplex_id", ",", "comment", "=", "signal_description", ",", "is_float", "=", "is_float", ")", "if", "signal_min", "is", "not", "None", ":", "new_signal", ".", "min", "=", "signal_min", "if", "signal_max", "is", "not", "None", ":", "new_signal", ".", "max", "=", "signal_max", "if", "new_signal", ".", "is_little_endian", "==", "0", ":", "# startbit of motorola coded signals are MSB in arxml", "new_signal", ".", "set_startbit", "(", "int", "(", "start_bit", ".", "text", ")", ",", "bitNumbering", "=", "1", ")", "# save signal, to determin receiver-ECUs for this signal later", "signal_rxs", "[", "system_signal", "]", "=", "new_signal", "if", "base_type", "is", "not", "None", ":", "temp", "=", "get_child", "(", "base_type", ",", "\"SHORT-NAME\"", ",", "root_or_cache", ",", "ns", ")", "if", "temp", "is", "not", "None", "and", "\"boolean\"", "==", "temp", ".", "text", ":", "new_signal", ".", "add_values", "(", "1", ",", "\"TRUE\"", ")", "new_signal", ".", "add_values", "(", "0", ",", "\"FALSE\"", ")", "if", "initvalue", "is", "not", "None", "and", "initvalue", ".", "text", "is", "not", "None", ":", "initvalue", ".", "text", "=", "canmatrix", ".", "utils", ".", "guess_value", "(", "initvalue", ".", "text", ")", "new_signal", ".", "_initValue", "=", "float_factory", "(", "initvalue", ".", "text", ")", "new_signal", ".", "add_attribute", "(", "\"GenSigStartValue\"", ",", "str", "(", "new_signal", ".", "_initValue", ")", ")", "else", ":", "new_signal", ".", "_initValue", "=", "0", "for", "key", ",", "value", "in", "list", "(", "values", ".", "items", "(", ")", ")", ":", "new_signal", ".", "add_values", "(", "key", ",", "value", ")", "if", "signal_name", "is", "not", "None", ":", "new_signal", ".", "add_attribute", "(", "\"LongName\"", ",", "signal_name", ")", "frame", ".", "add_signal", "(", "new_signal", ")" ]
Add signals from xml to the Frame.
[ "Add", "signals", "from", "xml", "to", "the", "Frame", "." ]
python
train
46.004348
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L169-L174
def simxSetJointPosition(clientID, jointHandle, position, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' return c_SetJointPosition(clientID, jointHandle, position, operationMode)
[ "def", "simxSetJointPosition", "(", "clientID", ",", "jointHandle", ",", "position", ",", "operationMode", ")", ":", "return", "c_SetJointPosition", "(", "clientID", ",", "jointHandle", ",", "position", ",", "operationMode", ")" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
42.166667
pyvisa/pyvisa
pyvisa/resources/resource.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L281-L291
def install_handler(self, event_type, handler, user_handle=None): """Installs handlers for event callbacks in this resource. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object) """ return self.visalib.install_visa_handler(self.session, event_type, handler, user_handle)
[ "def", "install_handler", "(", "self", ",", "event_type", ",", "handler", ",", "user_handle", "=", "None", ")", ":", "return", "self", ".", "visalib", ".", "install_visa_handler", "(", "self", ".", "session", ",", "event_type", ",", "handler", ",", "user_handle", ")" ]
Installs handlers for event callbacks in this resource. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object)
[ "Installs", "handlers", "for", "event", "callbacks", "in", "this", "resource", "." ]
python
train
55.181818
peri-source/peri
peri/comp/exactpsf.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/exactpsf.py#L344-L363
def characterize_psf(self): """ Get support size and drift polynomial for current set of params """ # there may be an issue with the support and characterization-- # it might be best to do the characterization with the same support # as the calculated psf. l,u = max(self.zrange[0], self.param_dict['psf-zslab']), self.zrange[1] size_l, drift_l = self.measure_size_drift(l) size_u, drift_u = self.measure_size_drift(u) # must be odd for now or have a better system for getting the center self.support = util.oddify(2*self.support_factor*size_u.astype('int')) self.drift_poly = np.polyfit([l, u], [drift_l, drift_u], 1) if self.cutoffval is not None: psf, vec, size_l = self.psf_slice(l, size=51, zoffset=drift_l, getextent=True) psf, vec, size_u = self.psf_slice(u, size=51, zoffset=drift_u, getextent=True) ss = [np.abs(i).sum(axis=-1) for i in [size_l, size_u]] self.support = util.oddify(util.amax(*ss))
[ "def", "characterize_psf", "(", "self", ")", ":", "# there may be an issue with the support and characterization--", "# it might be best to do the characterization with the same support", "# as the calculated psf.", "l", ",", "u", "=", "max", "(", "self", ".", "zrange", "[", "0", "]", ",", "self", ".", "param_dict", "[", "'psf-zslab'", "]", ")", ",", "self", ".", "zrange", "[", "1", "]", "size_l", ",", "drift_l", "=", "self", ".", "measure_size_drift", "(", "l", ")", "size_u", ",", "drift_u", "=", "self", ".", "measure_size_drift", "(", "u", ")", "# must be odd for now or have a better system for getting the center", "self", ".", "support", "=", "util", ".", "oddify", "(", "2", "*", "self", ".", "support_factor", "*", "size_u", ".", "astype", "(", "'int'", ")", ")", "self", ".", "drift_poly", "=", "np", ".", "polyfit", "(", "[", "l", ",", "u", "]", ",", "[", "drift_l", ",", "drift_u", "]", ",", "1", ")", "if", "self", ".", "cutoffval", "is", "not", "None", ":", "psf", ",", "vec", ",", "size_l", "=", "self", ".", "psf_slice", "(", "l", ",", "size", "=", "51", ",", "zoffset", "=", "drift_l", ",", "getextent", "=", "True", ")", "psf", ",", "vec", ",", "size_u", "=", "self", ".", "psf_slice", "(", "u", ",", "size", "=", "51", ",", "zoffset", "=", "drift_u", ",", "getextent", "=", "True", ")", "ss", "=", "[", "np", ".", "abs", "(", "i", ")", ".", "sum", "(", "axis", "=", "-", "1", ")", "for", "i", "in", "[", "size_l", ",", "size_u", "]", "]", "self", ".", "support", "=", "util", ".", "oddify", "(", "util", ".", "amax", "(", "*", "ss", ")", ")" ]
Get support size and drift polynomial for current set of params
[ "Get", "support", "size", "and", "drift", "polynomial", "for", "current", "set", "of", "params" ]
python
valid
51.35
kbr/fritzconnection
fritzconnection/fritzmonitor.py
https://github.com/kbr/fritzconnection/blob/b183f759ef19dd1652371e912d36cfe34f6639ac/fritzconnection/fritzmonitor.py#L58-L70
def set_fraction(self, value): """Set the meter indicator. Value should be between 0 and 1.""" if value < 0: value *= -1 value = min(value, 1) if self.horizontal: width = int(self.width * value) height = self.height else: width = self.width height = int(self.height * value) self.canvas.coords(self.meter, self.xpos, self.ypos, self.xpos + width, self.ypos + height)
[ "def", "set_fraction", "(", "self", ",", "value", ")", ":", "if", "value", "<", "0", ":", "value", "*=", "-", "1", "value", "=", "min", "(", "value", ",", "1", ")", "if", "self", ".", "horizontal", ":", "width", "=", "int", "(", "self", ".", "width", "*", "value", ")", "height", "=", "self", ".", "height", "else", ":", "width", "=", "self", ".", "width", "height", "=", "int", "(", "self", ".", "height", "*", "value", ")", "self", ".", "canvas", ".", "coords", "(", "self", ".", "meter", ",", "self", ".", "xpos", ",", "self", ".", "ypos", ",", "self", ".", "xpos", "+", "width", ",", "self", ".", "ypos", "+", "height", ")" ]
Set the meter indicator. Value should be between 0 and 1.
[ "Set", "the", "meter", "indicator", ".", "Value", "should", "be", "between", "0", "and", "1", "." ]
python
train
37.615385
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L172-L186
def write_screen(self, font, color, screen_pos, text, align="left", valign="top"): """Write to the screen in font.size relative coordinates.""" pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize() text_surf = font.render(str(text), True, color) rect = text_surf.get_rect() if pos.x >= 0: setattr(rect, align, pos.x) else: setattr(rect, align, self.surf.get_width() + pos.x) if pos.y >= 0: setattr(rect, valign, pos.y) else: setattr(rect, valign, self.surf.get_height() + pos.y) self.surf.blit(text_surf, rect)
[ "def", "write_screen", "(", "self", ",", "font", ",", "color", ",", "screen_pos", ",", "text", ",", "align", "=", "\"left\"", ",", "valign", "=", "\"top\"", ")", ":", "pos", "=", "point", ".", "Point", "(", "*", "screen_pos", ")", "*", "point", ".", "Point", "(", "0.75", ",", "1", ")", "*", "font", ".", "get_linesize", "(", ")", "text_surf", "=", "font", ".", "render", "(", "str", "(", "text", ")", ",", "True", ",", "color", ")", "rect", "=", "text_surf", ".", "get_rect", "(", ")", "if", "pos", ".", "x", ">=", "0", ":", "setattr", "(", "rect", ",", "align", ",", "pos", ".", "x", ")", "else", ":", "setattr", "(", "rect", ",", "align", ",", "self", ".", "surf", ".", "get_width", "(", ")", "+", "pos", ".", "x", ")", "if", "pos", ".", "y", ">=", "0", ":", "setattr", "(", "rect", ",", "valign", ",", "pos", ".", "y", ")", "else", ":", "setattr", "(", "rect", ",", "valign", ",", "self", ".", "surf", ".", "get_height", "(", ")", "+", "pos", ".", "y", ")", "self", ".", "surf", ".", "blit", "(", "text_surf", ",", "rect", ")" ]
Write to the screen in font.size relative coordinates.
[ "Write", "to", "the", "screen", "in", "font", ".", "size", "relative", "coordinates", "." ]
python
train
39.8
ttu/ruuvitag-sensor
ruuvitag_sensor/ruuvi.py
https://github.com/ttu/ruuvitag-sensor/blob/b5d1367c26844ae5875b2964c68e7b2f4e1cb082/ruuvitag_sensor/ruuvi.py#L115-L130
def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''): """ Get data for all ruuvitag sensors or sensors in the MAC's list. Args: callback (func): callback funcion to be called when new data is received macs (list): MAC addresses run_flag (object): RunFlag object. Function executes while run_flag.running bt_device (string): Bluetooth device id """ log.info('Get latest data for sensors. Stop with Ctrl+C.') log.info('MACs: %s', macs) for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device): callback(new_data)
[ "def", "get_datas", "(", "callback", ",", "macs", "=", "[", "]", ",", "run_flag", "=", "RunFlag", "(", ")", ",", "bt_device", "=", "''", ")", ":", "log", ".", "info", "(", "'Get latest data for sensors. Stop with Ctrl+C.'", ")", "log", ".", "info", "(", "'MACs: %s'", ",", "macs", ")", "for", "new_data", "in", "RuuviTagSensor", ".", "_get_ruuvitag_datas", "(", "macs", ",", "None", ",", "run_flag", ",", "bt_device", ")", ":", "callback", "(", "new_data", ")" ]
Get data for all ruuvitag sensors or sensors in the MAC's list. Args: callback (func): callback funcion to be called when new data is received macs (list): MAC addresses run_flag (object): RunFlag object. Function executes while run_flag.running bt_device (string): Bluetooth device id
[ "Get", "data", "for", "all", "ruuvitag", "sensors", "or", "sensors", "in", "the", "MAC", "s", "list", "." ]
python
train
40.9375
lacava/DistanceClassifier
DistanceClassifier/DistanceClassifier.py
https://github.com/lacava/DistanceClassifier/blob/cbb8a38a82b453c5821d2a2c3328b581f62e47bc/DistanceClassifier/DistanceClassifier.py#L165-L170
def is_invertible(self,X): """checks if Z is invertible""" if len(X.shape) == 2: return X.shape[0] == X.shape[1] and np.linalg.matrix_rank(X) == X.shape[0] else: return False
[ "def", "is_invertible", "(", "self", ",", "X", ")", ":", "if", "len", "(", "X", ".", "shape", ")", "==", "2", ":", "return", "X", ".", "shape", "[", "0", "]", "==", "X", ".", "shape", "[", "1", "]", "and", "np", ".", "linalg", ".", "matrix_rank", "(", "X", ")", "==", "X", ".", "shape", "[", "0", "]", "else", ":", "return", "False" ]
checks if Z is invertible
[ "checks", "if", "Z", "is", "invertible" ]
python
train
36.166667
jaraco/irc
irc/server.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/server.py#L381-L399
def handle_part(self, params): """ Handle a client parting from channel(s). """ for pchannel in params.split(','): if pchannel.strip() in self.server.channels: # Send message to all clients in all channels user is in, and # remove the user from the channels. channel = self.server.channels.get(pchannel.strip()) response = ':%s PART :%s' % (self.client_ident(), pchannel) if channel: for client in channel.clients: client.send_queue.append(response) channel.clients.remove(self) self.channels.pop(pchannel) else: _vars = self.server.servername, pchannel, pchannel response = ':%s 403 %s :%s' % _vars self.send_queue.append(response)
[ "def", "handle_part", "(", "self", ",", "params", ")", ":", "for", "pchannel", "in", "params", ".", "split", "(", "','", ")", ":", "if", "pchannel", ".", "strip", "(", ")", "in", "self", ".", "server", ".", "channels", ":", "# Send message to all clients in all channels user is in, and", "# remove the user from the channels.", "channel", "=", "self", ".", "server", ".", "channels", ".", "get", "(", "pchannel", ".", "strip", "(", ")", ")", "response", "=", "':%s PART :%s'", "%", "(", "self", ".", "client_ident", "(", ")", ",", "pchannel", ")", "if", "channel", ":", "for", "client", "in", "channel", ".", "clients", ":", "client", ".", "send_queue", ".", "append", "(", "response", ")", "channel", ".", "clients", ".", "remove", "(", "self", ")", "self", ".", "channels", ".", "pop", "(", "pchannel", ")", "else", ":", "_vars", "=", "self", ".", "server", ".", "servername", ",", "pchannel", ",", "pchannel", "response", "=", "':%s 403 %s :%s'", "%", "_vars", "self", ".", "send_queue", ".", "append", "(", "response", ")" ]
Handle a client parting from channel(s).
[ "Handle", "a", "client", "parting", "from", "channel", "(", "s", ")", "." ]
python
train
46
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L591-L605
def labels(self, leaves=True, internal=True): '''Generator over the (non-``None``) ``Node`` labels of this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' if not isinstance(leaves, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") for node in self.traverse_preorder(): if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())): yield node.label
[ "def", "labels", "(", "self", ",", "leaves", "=", "True", ",", "internal", "=", "True", ")", ":", "if", "not", "isinstance", "(", "leaves", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"leaves must be a bool\"", ")", "if", "not", "isinstance", "(", "internal", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"internal must be a bool\"", ")", "for", "node", "in", "self", ".", "traverse_preorder", "(", ")", ":", "if", "node", ".", "label", "is", "not", "None", "and", "(", "(", "leaves", "and", "node", ".", "is_leaf", "(", ")", ")", "or", "(", "internal", "and", "not", "node", ".", "is_leaf", "(", ")", ")", ")", ":", "yield", "node", ".", "label" ]
Generator over the (non-``None``) ``Node`` labels of this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
[ "Generator", "over", "the", "(", "non", "-", "None", ")", "Node", "labels", "of", "this", "Tree" ]
python
train
46.266667
DataBiosphere/toil
src/toil/fileStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/fileStore.py#L130-L140
def getLocalTempDir(self): """ Get a new local temporary directory in which to write files that persist for the duration of the job. :return: The absolute path to a new local temporary directory. This directory will exist for the duration of the job only, and is guaranteed to be deleted once the job terminates, removing all files it contains recursively. :rtype: str """ return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
[ "def", "getLocalTempDir", "(", "self", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "\"t\"", ",", "dir", "=", "self", ".", "localTempDir", ")", ")" ]
Get a new local temporary directory in which to write files that persist for the duration of the job. :return: The absolute path to a new local temporary directory. This directory will exist for the duration of the job only, and is guaranteed to be deleted once the job terminates, removing all files it contains recursively. :rtype: str
[ "Get", "a", "new", "local", "temporary", "directory", "in", "which", "to", "write", "files", "that", "persist", "for", "the", "duration", "of", "the", "job", "." ]
python
train
48.090909
quora/qcore
qcore/inspection.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/inspection.py#L215-L222
def get_subclass_tree(cls, ensure_unique=True): """Returns all subclasses (direct and recursive) of cls.""" subclasses = [] # cls.__subclasses__() fails on classes inheriting from type for subcls in type.__subclasses__(cls): subclasses.append(subcls) subclasses.extend(get_subclass_tree(subcls, ensure_unique)) return list(set(subclasses)) if ensure_unique else subclasses
[ "def", "get_subclass_tree", "(", "cls", ",", "ensure_unique", "=", "True", ")", ":", "subclasses", "=", "[", "]", "# cls.__subclasses__() fails on classes inheriting from type", "for", "subcls", "in", "type", ".", "__subclasses__", "(", "cls", ")", ":", "subclasses", ".", "append", "(", "subcls", ")", "subclasses", ".", "extend", "(", "get_subclass_tree", "(", "subcls", ",", "ensure_unique", ")", ")", "return", "list", "(", "set", "(", "subclasses", ")", ")", "if", "ensure_unique", "else", "subclasses" ]
Returns all subclasses (direct and recursive) of cls.
[ "Returns", "all", "subclasses", "(", "direct", "and", "recursive", ")", "of", "cls", "." ]
python
train
50.125
PedalPi/PluginsManager
pluginsmanager/util/restriction_list.py
https://github.com/PedalPi/PluginsManager/blob/2dcc9f6a79b48e9c9be82efffd855352fa15c5c7/pluginsmanager/util/restriction_list.py#L68-L73
def remove(self, item): """ See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method """ self.real_list.remove(item) self._items.remove(item)
[ "def", "remove", "(", "self", ",", "item", ")", ":", "self", ".", "real_list", ".", "remove", "(", "item", ")", "self", ".", "_items", ".", "remove", "(", "item", ")" ]
See :meth:`~pluginsmanager.observer.observable_list.ObservableList.remove()` method
[ "See", ":", "meth", ":", "~pluginsmanager", ".", "observer", ".", "observable_list", ".", "ObservableList", ".", "remove", "()", "method" ]
python
train
33.833333
Gbps/fastlog
fastlog/term.py
https://github.com/Gbps/fastlog/blob/8edb2327d72191510302c4654ffaa1691fe31277/fastlog/term.py#L117-L131
def typeseq(types): """ Returns an escape for a terminal text formatting type, or a list of types. Valid types are: * 'i' for 'italic' * 'b' for 'bold' * 'u' for 'underline' * 'r' for 'reverse' """ ret = "" for t in types: ret += termcap.get(fmttypes[t]) return ret
[ "def", "typeseq", "(", "types", ")", ":", "ret", "=", "\"\"", "for", "t", "in", "types", ":", "ret", "+=", "termcap", ".", "get", "(", "fmttypes", "[", "t", "]", ")", "return", "ret" ]
Returns an escape for a terminal text formatting type, or a list of types. Valid types are: * 'i' for 'italic' * 'b' for 'bold' * 'u' for 'underline' * 'r' for 'reverse'
[ "Returns", "an", "escape", "for", "a", "terminal", "text", "formatting", "type", "or", "a", "list", "of", "types", "." ]
python
train
21.666667
pydata/xarray
xarray/core/combine.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/combine.py#L215-L316
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions): """ Concatenate a sequence of datasets along a new or existing dimension """ from .dataset import Dataset if compat not in ['equals', 'identical']: raise ValueError("compat=%r invalid: must be 'equals' " "or 'identical'" % compat) dim, coord = _calc_concat_dim_coord(dim) # Make sure we're working on a copy (we'll be loading variables) datasets = [ds.copy() for ds in datasets] datasets = align(*datasets, join='outer', copy=False, exclude=[dim]) concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords) def insert_result_variable(k, v): assert isinstance(v, Variable) if k in datasets[0].coords: result_coord_names.add(k) result_vars[k] = v # create the new dataset and add constant variables result_vars = OrderedDict() result_coord_names = set(datasets[0].coords) result_attrs = datasets[0].attrs result_encoding = datasets[0].encoding for k, v in datasets[0].variables.items(): if k not in concat_over: insert_result_variable(k, v) # check that global attributes and non-concatenated variables are fixed # across all datasets for ds in datasets[1:]: if (compat == 'identical' and not utils.dict_equiv(ds.attrs, result_attrs)): raise ValueError('dataset global attributes not equal') for k, v in ds.variables.items(): if k not in result_vars and k not in concat_over: raise ValueError('encountered unexpected variable %r' % k) elif (k in result_coord_names) != (k in ds.coords): raise ValueError('%r is a coordinate in some datasets but not ' 'others' % k) elif k in result_vars and k != dim: # Don't use Variable.identical as it internally invokes # Variable.equals, and we may already know the answer if compat == 'identical' and not utils.dict_equiv( v.attrs, result_vars[k].attrs): raise ValueError( 'variable %s not identical across datasets' % k) # Proceed with equals() try: # May be populated when using the "different" method is_equal = equals[k] except KeyError: result_vars[k].load() is_equal = v.equals(result_vars[k]) if not is_equal: raise ValueError( 'variable %s not equal across datasets' % k) # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables dim_lengths = [ds.dims.get(dim, 1) for ds in datasets] non_concat_dims = {} for ds in datasets: non_concat_dims.update(ds.dims) non_concat_dims.pop(dim, None) def ensure_common_dims(vars): # ensure each variable with the given name shares the same # dimensions and the same shape for all of them except along the # concat dimension common_dims = tuple(pd.unique([d for v in vars for d in v.dims])) if dim not in common_dims: common_dims = (dim,) + common_dims for var, dim_len in zip(vars, dim_lengths): if var.dims != common_dims: common_shape = tuple(non_concat_dims.get(d, dim_len) for d in common_dims) var = var.set_dims(common_dims, common_shape) yield var # stack up each variable to fill-out the dataset (in order) for k in datasets[0].variables: if k in concat_over: vars = ensure_common_dims([ds.variables[k] for ds in datasets]) combined = concat_vars(vars, dim, positions) insert_result_variable(k, combined) result = Dataset(result_vars, attrs=result_attrs) result = result.set_coords(result_coord_names) result.encoding = result_encoding if coord is not None: # add concat dimension last to ensure that its in the final Dataset result[coord.name] = coord return result
[ "def", "_dataset_concat", "(", "datasets", ",", "dim", ",", "data_vars", ",", "coords", ",", "compat", ",", "positions", ")", ":", "from", ".", "dataset", "import", "Dataset", "if", "compat", "not", "in", "[", "'equals'", ",", "'identical'", "]", ":", "raise", "ValueError", "(", "\"compat=%r invalid: must be 'equals' \"", "\"or 'identical'\"", "%", "compat", ")", "dim", ",", "coord", "=", "_calc_concat_dim_coord", "(", "dim", ")", "# Make sure we're working on a copy (we'll be loading variables)", "datasets", "=", "[", "ds", ".", "copy", "(", ")", "for", "ds", "in", "datasets", "]", "datasets", "=", "align", "(", "*", "datasets", ",", "join", "=", "'outer'", ",", "copy", "=", "False", ",", "exclude", "=", "[", "dim", "]", ")", "concat_over", ",", "equals", "=", "_calc_concat_over", "(", "datasets", ",", "dim", ",", "data_vars", ",", "coords", ")", "def", "insert_result_variable", "(", "k", ",", "v", ")", ":", "assert", "isinstance", "(", "v", ",", "Variable", ")", "if", "k", "in", "datasets", "[", "0", "]", ".", "coords", ":", "result_coord_names", ".", "add", "(", "k", ")", "result_vars", "[", "k", "]", "=", "v", "# create the new dataset and add constant variables", "result_vars", "=", "OrderedDict", "(", ")", "result_coord_names", "=", "set", "(", "datasets", "[", "0", "]", ".", "coords", ")", "result_attrs", "=", "datasets", "[", "0", "]", ".", "attrs", "result_encoding", "=", "datasets", "[", "0", "]", ".", "encoding", "for", "k", ",", "v", "in", "datasets", "[", "0", "]", ".", "variables", ".", "items", "(", ")", ":", "if", "k", "not", "in", "concat_over", ":", "insert_result_variable", "(", "k", ",", "v", ")", "# check that global attributes and non-concatenated variables are fixed", "# across all datasets", "for", "ds", "in", "datasets", "[", "1", ":", "]", ":", "if", "(", "compat", "==", "'identical'", "and", "not", "utils", ".", "dict_equiv", "(", "ds", ".", "attrs", ",", "result_attrs", ")", ")", ":", "raise", "ValueError", "(", "'dataset global attributes not equal'", ")", "for", "k", ",", "v", "in", "ds", ".", "variables", ".", "items", "(", ")", ":", "if", "k", "not", "in", "result_vars", "and", "k", "not", "in", "concat_over", ":", "raise", "ValueError", "(", "'encountered unexpected variable %r'", "%", "k", ")", "elif", "(", "k", "in", "result_coord_names", ")", "!=", "(", "k", "in", "ds", ".", "coords", ")", ":", "raise", "ValueError", "(", "'%r is a coordinate in some datasets but not '", "'others'", "%", "k", ")", "elif", "k", "in", "result_vars", "and", "k", "!=", "dim", ":", "# Don't use Variable.identical as it internally invokes", "# Variable.equals, and we may already know the answer", "if", "compat", "==", "'identical'", "and", "not", "utils", ".", "dict_equiv", "(", "v", ".", "attrs", ",", "result_vars", "[", "k", "]", ".", "attrs", ")", ":", "raise", "ValueError", "(", "'variable %s not identical across datasets'", "%", "k", ")", "# Proceed with equals()", "try", ":", "# May be populated when using the \"different\" method", "is_equal", "=", "equals", "[", "k", "]", "except", "KeyError", ":", "result_vars", "[", "k", "]", ".", "load", "(", ")", "is_equal", "=", "v", ".", "equals", "(", "result_vars", "[", "k", "]", ")", "if", "not", "is_equal", ":", "raise", "ValueError", "(", "'variable %s not equal across datasets'", "%", "k", ")", "# we've already verified everything is consistent; now, calculate", "# shared dimension sizes so we can expand the necessary variables", "dim_lengths", "=", "[", "ds", ".", "dims", ".", "get", "(", "dim", ",", "1", ")", "for", "ds", "in", "datasets", "]", "non_concat_dims", "=", "{", "}", "for", "ds", "in", "datasets", ":", "non_concat_dims", ".", "update", "(", "ds", ".", "dims", ")", "non_concat_dims", ".", "pop", "(", "dim", ",", "None", ")", "def", "ensure_common_dims", "(", "vars", ")", ":", "# ensure each variable with the given name shares the same", "# dimensions and the same shape for all of them except along the", "# concat dimension", "common_dims", "=", "tuple", "(", "pd", ".", "unique", "(", "[", "d", "for", "v", "in", "vars", "for", "d", "in", "v", ".", "dims", "]", ")", ")", "if", "dim", "not", "in", "common_dims", ":", "common_dims", "=", "(", "dim", ",", ")", "+", "common_dims", "for", "var", ",", "dim_len", "in", "zip", "(", "vars", ",", "dim_lengths", ")", ":", "if", "var", ".", "dims", "!=", "common_dims", ":", "common_shape", "=", "tuple", "(", "non_concat_dims", ".", "get", "(", "d", ",", "dim_len", ")", "for", "d", "in", "common_dims", ")", "var", "=", "var", ".", "set_dims", "(", "common_dims", ",", "common_shape", ")", "yield", "var", "# stack up each variable to fill-out the dataset (in order)", "for", "k", "in", "datasets", "[", "0", "]", ".", "variables", ":", "if", "k", "in", "concat_over", ":", "vars", "=", "ensure_common_dims", "(", "[", "ds", ".", "variables", "[", "k", "]", "for", "ds", "in", "datasets", "]", ")", "combined", "=", "concat_vars", "(", "vars", ",", "dim", ",", "positions", ")", "insert_result_variable", "(", "k", ",", "combined", ")", "result", "=", "Dataset", "(", "result_vars", ",", "attrs", "=", "result_attrs", ")", "result", "=", "result", ".", "set_coords", "(", "result_coord_names", ")", "result", ".", "encoding", "=", "result_encoding", "if", "coord", "is", "not", "None", ":", "# add concat dimension last to ensure that its in the final Dataset", "result", "[", "coord", ".", "name", "]", "=", "coord", "return", "result" ]
Concatenate a sequence of datasets along a new or existing dimension
[ "Concatenate", "a", "sequence", "of", "datasets", "along", "a", "new", "or", "existing", "dimension" ]
python
train
41.45098
KnowledgeLinks/rdfframework
rdfframework/utilities/baseutilities.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/utilities/baseutilities.py#L259-L274
def make_set(value): ''' Takes a value and turns it into a set !!!! This is important because set(string) will parse a string to individual characters vs. adding the string as an element of the set i.e. x = 'setvalue' set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'} make_set(x) = {'setvalue'} or use set([x,]) by adding string as first item in list. ''' if isinstance(value, list): value = set(value) elif not isinstance(value, set): value = set([value,]) return value
[ "def", "make_set", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "set", "(", "value", ")", "elif", "not", "isinstance", "(", "value", ",", "set", ")", ":", "value", "=", "set", "(", "[", "value", ",", "]", ")", "return", "value" ]
Takes a value and turns it into a set !!!! This is important because set(string) will parse a string to individual characters vs. adding the string as an element of the set i.e. x = 'setvalue' set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'} make_set(x) = {'setvalue'} or use set([x,]) by adding string as first item in list.
[ "Takes", "a", "value", "and", "turns", "it", "into", "a", "set" ]
python
train
33.1875
hyperledger/indy-plenum
plenum/server/replica.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2577-L2587
def _request_pre_prepare(self, three_pc_key: Tuple[int, int], stash_data: Optional[Tuple[str, str, str]] = None) -> bool: """ Request preprepare """ recipients = self.primaryName return self._request_three_phase_msg(three_pc_key, self.requested_pre_prepares, PREPREPARE, recipients, stash_data)
[ "def", "_request_pre_prepare", "(", "self", ",", "three_pc_key", ":", "Tuple", "[", "int", ",", "int", "]", ",", "stash_data", ":", "Optional", "[", "Tuple", "[", "str", ",", "str", ",", "str", "]", "]", "=", "None", ")", "->", "bool", ":", "recipients", "=", "self", ".", "primaryName", "return", "self", ".", "_request_three_phase_msg", "(", "three_pc_key", ",", "self", ".", "requested_pre_prepares", ",", "PREPREPARE", ",", "recipients", ",", "stash_data", ")" ]
Request preprepare
[ "Request", "preprepare" ]
python
train
48.454545
razorpay/razorpay-python
razorpay/utility/utility.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/utility/utility.py#L50-L63
def compare_string(self, expected_str, actual_str): """ Returns True if the two strings are equal, False otherwise The time taken is independent of the number of characters that match For the sake of simplicity, this function executes in constant time only when the two strings have the same length. It short-circuits when they have different lengths """ if len(expected_str) != len(actual_str): return False result = 0 for x, y in zip(expected_str, actual_str): result |= ord(x) ^ ord(y) return result == 0
[ "def", "compare_string", "(", "self", ",", "expected_str", ",", "actual_str", ")", ":", "if", "len", "(", "expected_str", ")", "!=", "len", "(", "actual_str", ")", ":", "return", "False", "result", "=", "0", "for", "x", ",", "y", "in", "zip", "(", "expected_str", ",", "actual_str", ")", ":", "result", "|=", "ord", "(", "x", ")", "^", "ord", "(", "y", ")", "return", "result", "==", "0" ]
Returns True if the two strings are equal, False otherwise The time taken is independent of the number of characters that match For the sake of simplicity, this function executes in constant time only when the two strings have the same length. It short-circuits when they have different lengths
[ "Returns", "True", "if", "the", "two", "strings", "are", "equal", "False", "otherwise", "The", "time", "taken", "is", "independent", "of", "the", "number", "of", "characters", "that", "match", "For", "the", "sake", "of", "simplicity", "this", "function", "executes", "in", "constant", "time", "only", "when", "the", "two", "strings", "have", "the", "same", "length", ".", "It", "short", "-", "circuits", "when", "they", "have", "different", "lengths" ]
python
train
43.285714
MSchnei/pyprf_feature
pyprf_feature/simulation/pRF_functions.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L359-L485
def funcNrlTcMotPred(idxPrc, varPixX, varPixY, NrlMdlChunk, varNumTP, aryBoxCar, # aryCond path, varNumNrlMdls, varNumMtDrctn, varPar, queOut): """ Function for creating neural time course models. This function should be used to create neural models if different predictors for every motion direction are included. """ # # if hd5 method is used: open file for reading # filename = 'aryBoxCar' + str(idxPrc) + '.hdf5' # hdf5_path = os.path.join(path, filename) # fileH = tables.openFile(hdf5_path, mode='r') # Output array with pRF model time courses at all modelled standard # deviations for current pixel position: aryOut = np.empty((len(NrlMdlChunk), varNumTP, varNumMtDrctn), dtype='float32') # Prepare status indicator if this is the first of the parallel processes: if idxPrc == 1: # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Number of pRF models to fit: varNumLoops = varNumNrlMdls/varPar # Vector with pRF values at which to give status feedback: vecStatus = np.linspace(0, varNumLoops, num=(varStsStpSze+1), endpoint=True) vecStatus = np.ceil(vecStatus) vecStatus = vecStatus.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatusPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatusPrc = np.ceil(vecStatusPrc) vecStatusPrc = vecStatusPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # Loop through all Gauss parameters that are in this chunk for idx, NrlMdlTrpl in enumerate(NrlMdlChunk): # Status indicator (only used in the first of the parallel # processes): if idxPrc == 1: # Status indicator: if varCntSts02 == vecStatus[varCntSts01]: # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatusPrc[varCntSts01]) + ' % --- ' + str(vecStatus[varCntSts01]) + ' loops out of ' + str(varNumLoops)) print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # x pos of Gauss model: NrlMdlTrpl[0] # y pos of Gauss model: NrlMdlTrpl[1] # std of Gauss model: NrlMdlTrpl[2] # index of tng crv model: NrlMdlTrpl[3] varTmpX = int(np.around(NrlMdlTrpl[0], 0)) varTmpY = int(np.around(NrlMdlTrpl[1], 0)) # Create pRF model (2D): aryGauss = funcGauss2D(varPixX, varPixY, varTmpX, varTmpY, NrlMdlTrpl[2]) # Multiply pixel-wise box car model with Gaussian pRF models: aryNrlTcTmp = np.multiply(aryBoxCar, aryGauss[:, :, None, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. This is essentially an unscaled version of the # neural time course model (i.e. not yet scaled for the size of # the pRF). aryNrlTcTmp = np.sum(aryNrlTcTmp, axis=(0, 1)) # Normalise the nrl time course model to the size of the pRF. This # gives us the ratio of 'activation' of the pRF at each time point, # or, in other words, the neural time course model. aryNrlTcTmp = np.divide(aryNrlTcTmp, np.sum(aryGauss, axis=(0, 1))) # Put model time courses into the function's output array: aryOut[idx, :, :] = aryNrlTcTmp # Status indicator (only used in the first of the parallel # processes): if idxPrc == 1: # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # Output list: lstOut = [idxPrc, aryOut, ] queOut.put(lstOut)
[ "def", "funcNrlTcMotPred", "(", "idxPrc", ",", "varPixX", ",", "varPixY", ",", "NrlMdlChunk", ",", "varNumTP", ",", "aryBoxCar", ",", "# aryCond", "path", ",", "varNumNrlMdls", ",", "varNumMtDrctn", ",", "varPar", ",", "queOut", ")", ":", "# # if hd5 method is used: open file for reading", "# filename = 'aryBoxCar' + str(idxPrc) + '.hdf5'", "# hdf5_path = os.path.join(path, filename)", "# fileH = tables.openFile(hdf5_path, mode='r')", "# Output array with pRF model time courses at all modelled standard", "# deviations for current pixel position:", "aryOut", "=", "np", ".", "empty", "(", "(", "len", "(", "NrlMdlChunk", ")", ",", "varNumTP", ",", "varNumMtDrctn", ")", ",", "dtype", "=", "'float32'", ")", "# Prepare status indicator if this is the first of the parallel processes:", "if", "idxPrc", "==", "1", ":", "# We create a status indicator for the time consuming pRF model finding", "# algorithm. Number of steps of the status indicator:", "varStsStpSze", "=", "20", "# Number of pRF models to fit:", "varNumLoops", "=", "varNumNrlMdls", "/", "varPar", "# Vector with pRF values at which to give status feedback:", "vecStatus", "=", "np", ".", "linspace", "(", "0", ",", "varNumLoops", ",", "num", "=", "(", "varStsStpSze", "+", "1", ")", ",", "endpoint", "=", "True", ")", "vecStatus", "=", "np", ".", "ceil", "(", "vecStatus", ")", "vecStatus", "=", "vecStatus", ".", "astype", "(", "int", ")", "# Vector with corresponding percentage values at which to give status", "# feedback:", "vecStatusPrc", "=", "np", ".", "linspace", "(", "0", ",", "100", ",", "num", "=", "(", "varStsStpSze", "+", "1", ")", ",", "endpoint", "=", "True", ")", "vecStatusPrc", "=", "np", ".", "ceil", "(", "vecStatusPrc", ")", "vecStatusPrc", "=", "vecStatusPrc", ".", "astype", "(", "int", ")", "# Counter for status indicator:", "varCntSts01", "=", "0", "varCntSts02", "=", "0", "# Loop through all Gauss parameters that are in this chunk", "for", "idx", ",", "NrlMdlTrpl", "in", "enumerate", "(", "NrlMdlChunk", ")", ":", "# Status indicator (only used in the first of the parallel", "# processes):", "if", "idxPrc", "==", "1", ":", "# Status indicator:", "if", "varCntSts02", "==", "vecStatus", "[", "varCntSts01", "]", ":", "# Prepare status message:", "strStsMsg", "=", "(", "'---------Progress: '", "+", "str", "(", "vecStatusPrc", "[", "varCntSts01", "]", ")", "+", "' % --- '", "+", "str", "(", "vecStatus", "[", "varCntSts01", "]", ")", "+", "' loops out of '", "+", "str", "(", "varNumLoops", ")", ")", "print", "(", "strStsMsg", ")", "# Only increment counter if the last value has not been", "# reached yet:", "if", "varCntSts01", "<", "varStsStpSze", ":", "varCntSts01", "=", "varCntSts01", "+", "int", "(", "1", ")", "# x pos of Gauss model: NrlMdlTrpl[0]", "# y pos of Gauss model: NrlMdlTrpl[1]", "# std of Gauss model: NrlMdlTrpl[2]", "# index of tng crv model: NrlMdlTrpl[3]", "varTmpX", "=", "int", "(", "np", ".", "around", "(", "NrlMdlTrpl", "[", "0", "]", ",", "0", ")", ")", "varTmpY", "=", "int", "(", "np", ".", "around", "(", "NrlMdlTrpl", "[", "1", "]", ",", "0", ")", ")", "# Create pRF model (2D):", "aryGauss", "=", "funcGauss2D", "(", "varPixX", ",", "varPixY", ",", "varTmpX", ",", "varTmpY", ",", "NrlMdlTrpl", "[", "2", "]", ")", "# Multiply pixel-wise box car model with Gaussian pRF models:", "aryNrlTcTmp", "=", "np", ".", "multiply", "(", "aryBoxCar", ",", "aryGauss", "[", ":", ",", ":", ",", "None", ",", "None", "]", ")", "# Calculate sum across x- and y-dimensions - the 'area under the", "# Gaussian surface'. This is essentially an unscaled version of the", "# neural time course model (i.e. not yet scaled for the size of", "# the pRF).", "aryNrlTcTmp", "=", "np", ".", "sum", "(", "aryNrlTcTmp", ",", "axis", "=", "(", "0", ",", "1", ")", ")", "# Normalise the nrl time course model to the size of the pRF. This", "# gives us the ratio of 'activation' of the pRF at each time point,", "# or, in other words, the neural time course model.", "aryNrlTcTmp", "=", "np", ".", "divide", "(", "aryNrlTcTmp", ",", "np", ".", "sum", "(", "aryGauss", ",", "axis", "=", "(", "0", ",", "1", ")", ")", ")", "# Put model time courses into the function's output array:", "aryOut", "[", "idx", ",", ":", ",", ":", "]", "=", "aryNrlTcTmp", "# Status indicator (only used in the first of the parallel", "# processes):", "if", "idxPrc", "==", "1", ":", "# Increment status indicator counter:", "varCntSts02", "=", "varCntSts02", "+", "1", "# Output list:", "lstOut", "=", "[", "idxPrc", ",", "aryOut", ",", "]", "queOut", ".", "put", "(", "lstOut", ")" ]
Function for creating neural time course models. This function should be used to create neural models if different predictors for every motion direction are included.
[ "Function", "for", "creating", "neural", "time", "course", "models", ".", "This", "function", "should", "be", "used", "to", "create", "neural", "models", "if", "different", "predictors", "for", "every", "motion", "direction", "are", "included", "." ]
python
train
36.307087
gregoil/ipdbugger
ipdbugger/__init__.py
https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L134-L175
def wrap_with_try(self, node): """Wrap an ast node in a 'try' node to enter debug on exception.""" handlers = [] if self.ignore_exceptions is None: handlers.append(ast.ExceptHandler(type=None, name=None, body=[ast.Raise()])) else: ignores_nodes = self.ignore_exceptions handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes, ast.Load()), name=None, body=[ast.Raise()])) if self.catch_exception is None or \ get_node_value(self.catch_exception) not in \ (get_node_value(ast_node) for ast_node in self.ignore_exceptions): call_extra_parameters = [] if IS_PYTHON_3 else [None, None] start_debug_cmd = ast.Expr( value=ast.Call(ast.Name("start_debugging", ast.Load()), [], [], *call_extra_parameters)) catch_exception_type = None if self.catch_exception is not None: catch_exception_type = self.catch_exception handlers.append(ast.ExceptHandler(type=catch_exception_type, name=None, body=[start_debug_cmd])) try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {} new_node = self.ast_try_except(orelse=[], body=[node], handlers=handlers, **try_except_extra_params) return ast.copy_location(new_node, node)
[ "def", "wrap_with_try", "(", "self", ",", "node", ")", ":", "handlers", "=", "[", "]", "if", "self", ".", "ignore_exceptions", "is", "None", ":", "handlers", ".", "append", "(", "ast", ".", "ExceptHandler", "(", "type", "=", "None", ",", "name", "=", "None", ",", "body", "=", "[", "ast", ".", "Raise", "(", ")", "]", ")", ")", "else", ":", "ignores_nodes", "=", "self", ".", "ignore_exceptions", "handlers", ".", "append", "(", "ast", ".", "ExceptHandler", "(", "type", "=", "ast", ".", "Tuple", "(", "ignores_nodes", ",", "ast", ".", "Load", "(", ")", ")", ",", "name", "=", "None", ",", "body", "=", "[", "ast", ".", "Raise", "(", ")", "]", ")", ")", "if", "self", ".", "catch_exception", "is", "None", "or", "get_node_value", "(", "self", ".", "catch_exception", ")", "not", "in", "(", "get_node_value", "(", "ast_node", ")", "for", "ast_node", "in", "self", ".", "ignore_exceptions", ")", ":", "call_extra_parameters", "=", "[", "]", "if", "IS_PYTHON_3", "else", "[", "None", ",", "None", "]", "start_debug_cmd", "=", "ast", ".", "Expr", "(", "value", "=", "ast", ".", "Call", "(", "ast", ".", "Name", "(", "\"start_debugging\"", ",", "ast", ".", "Load", "(", ")", ")", ",", "[", "]", ",", "[", "]", ",", "*", "call_extra_parameters", ")", ")", "catch_exception_type", "=", "None", "if", "self", ".", "catch_exception", "is", "not", "None", ":", "catch_exception_type", "=", "self", ".", "catch_exception", "handlers", ".", "append", "(", "ast", ".", "ExceptHandler", "(", "type", "=", "catch_exception_type", ",", "name", "=", "None", ",", "body", "=", "[", "start_debug_cmd", "]", ")", ")", "try_except_extra_params", "=", "{", "\"finalbody\"", ":", "[", "]", "}", "if", "IS_PYTHON_3", "else", "{", "}", "new_node", "=", "self", ".", "ast_try_except", "(", "orelse", "=", "[", "]", ",", "body", "=", "[", "node", "]", ",", "handlers", "=", "handlers", ",", "*", "*", "try_except_extra_params", ")", "return", "ast", ".", "copy_location", "(", "new_node", ",", "node", ")" ]
Wrap an ast node in a 'try' node to enter debug on exception.
[ "Wrap", "an", "ast", "node", "in", "a", "try", "node", "to", "enter", "debug", "on", "exception", "." ]
python
train
43.619048
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L1408-L1412
def build(self, builder): """Build XML by appending to builder""" builder.start("CheckValue", {}) builder.data(str(self.value)) builder.end("CheckValue")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "builder", ".", "start", "(", "\"CheckValue\"", ",", "{", "}", ")", "builder", ".", "data", "(", "str", "(", "self", ".", "value", ")", ")", "builder", ".", "end", "(", "\"CheckValue\"", ")" ]
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
36.2
bwohlberg/sporco
sporco/cupy/__init__.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cupy/__init__.py#L254-L257
def _inner(x, y, axis=-1): """Patched version of :func:`sporco.linalg.inner`.""" return cp.sum(x * y, axis=axis, keepdims=True)
[ "def", "_inner", "(", "x", ",", "y", ",", "axis", "=", "-", "1", ")", ":", "return", "cp", ".", "sum", "(", "x", "*", "y", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")" ]
Patched version of :func:`sporco.linalg.inner`.
[ "Patched", "version", "of", ":", "func", ":", "sporco", ".", "linalg", ".", "inner", "." ]
python
train
33.25
learningequality/ricecooker
examples/sample_program.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/examples/sample_program.py#L306-L314
def construct_channel(self, *args, **kwargs): """ Create ChannelNode and build topic tree. """ channel = self.get_channel(*args, **kwargs) # creates ChannelNode from data in self.channel_info _build_tree(channel, SAMPLE_TREE) raise_for_invalid_channel(channel) return channel
[ "def", "construct_channel", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "channel", "=", "self", ".", "get_channel", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# creates ChannelNode from data in self.channel_info", "_build_tree", "(", "channel", ",", "SAMPLE_TREE", ")", "raise_for_invalid_channel", "(", "channel", ")", "return", "channel" ]
Create ChannelNode and build topic tree.
[ "Create", "ChannelNode", "and", "build", "topic", "tree", "." ]
python
train
36.222222
openwisp/netdiff
netdiff/parsers/batman.py
https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L108-L118
def _parse_txtinfo(self, data): """ Converts the python list returned by self._txtinfo_to_python() to a NetworkX Graph object, which is then returned. """ graph = self._init_graph() for link in data: graph.add_edge(link['source'], link['target'], weight=link['cost']) return graph
[ "def", "_parse_txtinfo", "(", "self", ",", "data", ")", ":", "graph", "=", "self", ".", "_init_graph", "(", ")", "for", "link", "in", "data", ":", "graph", ".", "add_edge", "(", "link", "[", "'source'", "]", ",", "link", "[", "'target'", "]", ",", "weight", "=", "link", "[", "'cost'", "]", ")", "return", "graph" ]
Converts the python list returned by self._txtinfo_to_python() to a NetworkX Graph object, which is then returned.
[ "Converts", "the", "python", "list", "returned", "by", "self", ".", "_txtinfo_to_python", "()", "to", "a", "NetworkX", "Graph", "object", "which", "is", "then", "returned", "." ]
python
train
35.636364
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10593-L10617
def set_position_target_local_ned_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): ''' Sets a desired vehicle position in a local north-east-down coordinate frame. Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) x : X Position in NED frame in meters (float) y : Y Position in NED frame in meters (float) z : Z Position in NED frame in meters (note, altitude is negative in NED) (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return self.send(self.set_position_target_local_ned_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
[ "def", "set_position_target_local_ned_send", "(", "self", ",", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "coordinate_frame", ",", "type_mask", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "set_position_target_local_ned_encode", "(", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "coordinate_frame", ",", "type_mask", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Sets a desired vehicle position in a local north-east-down coordinate frame. Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) x : X Position in NED frame in meters (float) y : Y Position in NED frame in meters (float) z : Z Position in NED frame in meters (note, altitude is negative in NED) (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Sets", "a", "desired", "vehicle", "position", "in", "a", "local", "north", "-", "east", "-", "down", "coordinate", "frame", ".", "Used", "by", "an", "external", "controller", "to", "command", "the", "vehicle", "(", "manual", "controller", "or", "other", "system", ")", "." ]
python
train
108.52
non-Jedi/gyr
gyr/matrix_objects.py
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L49-L55
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
[ "def", "user", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_user", "except", "AttributeError", ":", "self", ".", "_user", "=", "MatrixUser", "(", "self", ".", "mxid", ",", "self", ".", "Api", "(", "identity", "=", "self", ".", "mxid", ")", ")", "return", "self", ".", "_user" ]
Creates a User object when requested.
[ "Creates", "a", "User", "object", "when", "requested", "." ]
python
train
34.571429
zeromake/aiko
aiko/request.py
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/request.py#L321-L326
def set(self, name: str, value: str) -> None: """ 重写请求中的 header, 不推荐使用 """ name = name.casefold() self._headers[name] = value
[ "def", "set", "(", "self", ",", "name", ":", "str", ",", "value", ":", "str", ")", "->", "None", ":", "name", "=", "name", ".", "casefold", "(", ")", "self", ".", "_headers", "[", "name", "]", "=", "value" ]
重写请求中的 header, 不推荐使用
[ "重写请求中的", "header", "不推荐使用" ]
python
train
26.666667
numenta/nupic
src/nupic/regions/tm_region.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/tm_region.py#L65-L114
def _buildArgs(f, self=None, kwargs={}): """ Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function. """ # Get the name, description, and default value for each argument argTuples = getArgumentDescriptions(f) argTuples = argTuples[1:] # Remove 'self' # Get the names of the parameters to our own constructor and remove them # Check for _originial_init first, because if LockAttributesMixin is used, # __init__'s signature will be just (self, *args, **kw), but # _original_init is created with the original signature #init = getattr(self, '_original_init', self.__init__) init = TMRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are # computed automatically (e.g. numberOfCols for the TM) ourArgNames += [ 'numberOfCols', # TM ] for argTuple in argTuples[:]: if argTuple[0] in ourArgNames: argTuples.remove(argTuple) # Build the dictionary of arguments if self: for argTuple in argTuples: argName = argTuple[0] if argName in kwargs: # Argument was provided argValue = kwargs.pop(argName) else: # Argument was not provided; use the default value if there is one, and # raise an exception otherwise if len(argTuple) == 2: # No default value raise TypeError("Must provide '%s'" % argName) argValue = argTuple[2] # Set as an instance variable if 'self' was passed in setattr(self, argName, argValue) return argTuples
[ "def", "_buildArgs", "(", "f", ",", "self", "=", "None", ",", "kwargs", "=", "{", "}", ")", ":", "# Get the name, description, and default value for each argument", "argTuples", "=", "getArgumentDescriptions", "(", "f", ")", "argTuples", "=", "argTuples", "[", "1", ":", "]", "# Remove 'self'", "# Get the names of the parameters to our own constructor and remove them", "# Check for _originial_init first, because if LockAttributesMixin is used,", "# __init__'s signature will be just (self, *args, **kw), but", "# _original_init is created with the original signature", "#init = getattr(self, '_original_init', self.__init__)", "init", "=", "TMRegion", ".", "__init__", "ourArgNames", "=", "[", "t", "[", "0", "]", "for", "t", "in", "getArgumentDescriptions", "(", "init", ")", "]", "# Also remove a few other names that aren't in our constructor but are", "# computed automatically (e.g. numberOfCols for the TM)", "ourArgNames", "+=", "[", "'numberOfCols'", ",", "# TM", "]", "for", "argTuple", "in", "argTuples", "[", ":", "]", ":", "if", "argTuple", "[", "0", "]", "in", "ourArgNames", ":", "argTuples", ".", "remove", "(", "argTuple", ")", "# Build the dictionary of arguments", "if", "self", ":", "for", "argTuple", "in", "argTuples", ":", "argName", "=", "argTuple", "[", "0", "]", "if", "argName", "in", "kwargs", ":", "# Argument was provided", "argValue", "=", "kwargs", ".", "pop", "(", "argName", ")", "else", ":", "# Argument was not provided; use the default value if there is one, and", "# raise an exception otherwise", "if", "len", "(", "argTuple", ")", "==", "2", ":", "# No default value", "raise", "TypeError", "(", "\"Must provide '%s'\"", "%", "argName", ")", "argValue", "=", "argTuple", "[", "2", "]", "# Set as an instance variable if 'self' was passed in", "setattr", "(", "self", ",", "argName", ",", "argValue", ")", "return", "argTuples" ]
Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function.
[ "Get", "the", "default", "arguments", "from", "the", "function", "and", "assign", "as", "instance", "vars", "." ]
python
valid
36.84
TorkamaniLab/metapipe
metapipe/models/pbs_job.py
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/pbs_job.py#L60-L80
def _grep_qstat(self, status_type='complete'): """ Greps qstat -e <job_id> for information from the queue. :paramsstatus_type: complete, queued, running, error, gone """ args = "qstat -e {}".format(self.id).split() res, _ = call(args) if res == '': return False res = res.split('\n')[2].split()[4] if status_type == 'complete' and res == 'C': return True elif status_type == 'error' and (res == 'E' or res == 'C'): return True elif status_type == 'running' and res == 'R': return True elif status_type == 'queued' and res == 'Q': return True elif status_type == 'gone' and 'unknown job id' in str(res).lower(): return True else: return False
[ "def", "_grep_qstat", "(", "self", ",", "status_type", "=", "'complete'", ")", ":", "args", "=", "\"qstat -e {}\"", ".", "format", "(", "self", ".", "id", ")", ".", "split", "(", ")", "res", ",", "_", "=", "call", "(", "args", ")", "if", "res", "==", "''", ":", "return", "False", "res", "=", "res", ".", "split", "(", "'\\n'", ")", "[", "2", "]", ".", "split", "(", ")", "[", "4", "]", "if", "status_type", "==", "'complete'", "and", "res", "==", "'C'", ":", "return", "True", "elif", "status_type", "==", "'error'", "and", "(", "res", "==", "'E'", "or", "res", "==", "'C'", ")", ":", "return", "True", "elif", "status_type", "==", "'running'", "and", "res", "==", "'R'", ":", "return", "True", "elif", "status_type", "==", "'queued'", "and", "res", "==", "'Q'", ":", "return", "True", "elif", "status_type", "==", "'gone'", "and", "'unknown job id'", "in", "str", "(", "res", ")", ".", "lower", "(", ")", ":", "return", "True", "else", ":", "return", "False" ]
Greps qstat -e <job_id> for information from the queue. :paramsstatus_type: complete, queued, running, error, gone
[ "Greps", "qstat", "-", "e", "<job_id", ">", "for", "information", "from", "the", "queue", ".", ":", "paramsstatus_type", ":", "complete", "queued", "running", "error", "gone" ]
python
train
38
dereneaton/ipyrad
ipyrad/plotting/baba_panel_plot.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/baba_panel_plot.py#L26-L66
def baba_panel_plot( ttree, tests, boots, show_tip_labels=True, show_test_labels=True, use_edge_lengths=False, collapse_outgroup=False, pct_tree_x=0.4, pct_tree_y=0.2, alpha=3.0, *args, **kwargs): """ signature... """ ## create Panel plot object and set height & width bootsarr = np.array(boots) panel = Panel(ttree, tests, bootsarr, alpha) if not kwargs.get("width"): panel.kwargs["width"] = min(1000, 50*len(panel.tree)) if not kwargs.get("height"): panel.kwargs["height"] = min(1000, 50*len(panel.tests)) ## update defaults with kwargs & update size based on ntips & ntests kwargs.update(dict(pct_tree_x=pct_tree_x, pct_tree_y=pct_tree_y)) panel.kwargs.update(kwargs) ## create a canvas and a single cartesian coord system canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width']) axes = canvas.cartesian(bounds=("10%", "90%", "5%", "95%")) axes.show = False ## add panels to axes panel.panel_tree(axes) panel.panel_test(axes) panel.panel_tip_labels(axes) if isinstance(boots, np.ndarray): panel.panel_results(axes) return canvas, axes, panel
[ "def", "baba_panel_plot", "(", "ttree", ",", "tests", ",", "boots", ",", "show_tip_labels", "=", "True", ",", "show_test_labels", "=", "True", ",", "use_edge_lengths", "=", "False", ",", "collapse_outgroup", "=", "False", ",", "pct_tree_x", "=", "0.4", ",", "pct_tree_y", "=", "0.2", ",", "alpha", "=", "3.0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "## create Panel plot object and set height & width", "bootsarr", "=", "np", ".", "array", "(", "boots", ")", "panel", "=", "Panel", "(", "ttree", ",", "tests", ",", "bootsarr", ",", "alpha", ")", "if", "not", "kwargs", ".", "get", "(", "\"width\"", ")", ":", "panel", ".", "kwargs", "[", "\"width\"", "]", "=", "min", "(", "1000", ",", "50", "*", "len", "(", "panel", ".", "tree", ")", ")", "if", "not", "kwargs", ".", "get", "(", "\"height\"", ")", ":", "panel", ".", "kwargs", "[", "\"height\"", "]", "=", "min", "(", "1000", ",", "50", "*", "len", "(", "panel", ".", "tests", ")", ")", "## update defaults with kwargs & update size based on ntips & ntests", "kwargs", ".", "update", "(", "dict", "(", "pct_tree_x", "=", "pct_tree_x", ",", "pct_tree_y", "=", "pct_tree_y", ")", ")", "panel", ".", "kwargs", ".", "update", "(", "kwargs", ")", "## create a canvas and a single cartesian coord system", "canvas", "=", "toyplot", ".", "Canvas", "(", "height", "=", "panel", ".", "kwargs", "[", "'height'", "]", ",", "width", "=", "panel", ".", "kwargs", "[", "'width'", "]", ")", "axes", "=", "canvas", ".", "cartesian", "(", "bounds", "=", "(", "\"10%\"", ",", "\"90%\"", ",", "\"5%\"", ",", "\"95%\"", ")", ")", "axes", ".", "show", "=", "False", "## add panels to axes", "panel", ".", "panel_tree", "(", "axes", ")", "panel", ".", "panel_test", "(", "axes", ")", "panel", ".", "panel_tip_labels", "(", "axes", ")", "if", "isinstance", "(", "boots", ",", "np", ".", "ndarray", ")", ":", "panel", ".", "panel_results", "(", "axes", ")", "return", "canvas", ",", "axes", ",", "panel" ]
signature...
[ "signature", "..." ]
python
valid
29.390244
RockFeng0/rtsf-web
webuidriver/actions.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/actions.py#L199-L218
def _element(cls): ''' find the element with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
[ "def", "_element", "(", "cls", ")", ":", "if", "not", "cls", ".", "__is_selector", "(", ")", ":", "raise", "Exception", "(", "\"Invalid selector[%s].\"", "%", "cls", ".", "__control", "[", "\"by\"", "]", ")", "driver", "=", "Web", ".", "driver", "try", ":", "elements", "=", "WebDriverWait", "(", "driver", ",", "cls", ".", "__control", "[", "\"timeout\"", "]", ")", ".", "until", "(", "lambda", "driver", ":", "getattr", "(", "driver", ",", "\"find_elements\"", ")", "(", "cls", ".", "__control", "[", "\"by\"", "]", ",", "cls", ".", "__control", "[", "\"value\"", "]", ")", ")", "except", ":", "raise", "Exception", "(", "\"Timeout at %d seconds.Element(%s) not found.\"", "%", "(", "cls", ".", "__control", "[", "\"timeout\"", "]", ",", "cls", ".", "__control", "[", "\"by\"", "]", ")", ")", "if", "len", "(", "elements", ")", "<", "cls", ".", "__control", "[", "\"index\"", "]", "+", "1", ":", "raise", "Exception", "(", "\"Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]\"", "%", "(", "cls", ".", "__name__", ",", "len", "(", "elements", ")", ",", "cls", ".", "__control", "[", "\"index\"", "]", ")", ")", "if", "len", "(", "elements", ")", ">", "1", ":", "print", "(", "\"Element [%s]: There are [%d] elements, choosed index=%d\"", "%", "(", "cls", ".", "__name__", ",", "len", "(", "elements", ")", ",", "cls", ".", "__control", "[", "\"index\"", "]", ")", ")", "elm", "=", "elements", "[", "cls", ".", "__control", "[", "\"index\"", "]", "]", "cls", ".", "__control", "[", "\"index\"", "]", "=", "0", "return", "elm" ]
find the element with controls
[ "find", "the", "element", "with", "controls" ]
python
train
55.8
TheOneHyer/arandomness
build/lib.linux-x86_64-3.6/arandomness/files/copen.py
https://github.com/TheOneHyer/arandomness/blob/ae9f630e9a1d67b0eb6d61644a49756de8a5268c/build/lib.linux-x86_64-3.6/arandomness/files/copen.py#L41-L147
def copen(fileobj, mode='rb', **kwargs): """Detects and opens compressed file for reading and writing. Args: fileobj (File): any File-like object supported by an underlying compression algorithm mode (unicode): mode to open fileobj with **kwargs: keyword-arguments to pass to the compression algorithm Returns: File: TextWrapper if no compression, else returns appropriate wrapper for the compression type Example: .. code-block:: Python >>> from tempfile import NamedTemporaryFile >>> # Write compressed file >>> temp = NamedTemporaryFile(delete=False, suffix='.bz2') >>> test_bz2 = copen(temp.name, 'wb') >>> test_bz2.write(b'bzip2') >>> test_bz2.close() >>> # Read compressed bzip file >>> test_bz2 = copen(temp.name, 'rb') >>> test_bz2.read() b'bzip2' """ algo = io.open # Only used as io.open in write mode mode = mode.lower().strip() modules = {} # Later populated by compression algorithms write_mode = False if mode.lstrip('U')[0] == 'r' else True kwargs['mode'] = mode # Currently supported compression algorithms modules_to_import = { 'bz2': 'BZ2File', 'gzip': 'GzipFile', 'lzma': 'LZMAFile' } # Dynamically import compression libraries and warn about failures for mod, _class in modules_to_import.items(): try: modules[_class] = getattr(import_module(mod), _class) except (ImportError, AttributeError) as e: modules[_class] = open warn('Cannot process {0} files due to following error:' '{1}{2}{1}You will need to install the {0} library to ' 'properly use these files. Currently, such files will ' 'open in "text" mode.'.format(mod, linesep, e)) # Write mode if write_mode is True: # Map file extensions to decompression classes algo_map = { 'bz2': modules['BZ2File'], 'gz': modules['GzipFile'], 'xz': modules['LZMAFile'] } # Determine the compression algorithm via the file extension ext = fileobj.split('.')[-1] try: algo = algo_map[ext] except KeyError: pass # Read mode else: algo = io.TextIOWrapper # Default to plaintext buffer # Magic headers of encryption formats file_sigs = { b'\x42\x5a\x68': modules['BZ2File'], b'\x1f\x8b\x08': modules['GzipFile'], b'\xfd7zXZ\x00': modules['LZMAFile'] } # Open the file, buffer it, and identify the compression algorithm fileobj = io.BufferedReader(io.open(fileobj, 'rb')) max_len = max(len(x) for x in file_sigs.keys()) start = fileobj.peek(max_len) for sig in file_sigs.keys(): if start.startswith(sig): algo = file_sigs[sig] break # Stop iterating once a good signature is found # Filter all **kwargs by the args accepted by the compression algorithm algo_args = set(getfullargspec(algo).args) good_args = set(kwargs.keys()).intersection(algo_args) _kwargs = {arg: kwargs[arg] for arg in good_args} # Open the file using parameters defined above and store in namespace if write_mode is True: handle = algo(fileobj, **_kwargs) else: try: # For algorithms that need to be explicitly given a fileobj handle = algo(fileobj=fileobj, **_kwargs) except TypeError: # For algorithms that detect file objects handle = algo(fileobj, **_kwargs) return handle
[ "def", "copen", "(", "fileobj", ",", "mode", "=", "'rb'", ",", "*", "*", "kwargs", ")", ":", "algo", "=", "io", ".", "open", "# Only used as io.open in write mode", "mode", "=", "mode", ".", "lower", "(", ")", ".", "strip", "(", ")", "modules", "=", "{", "}", "# Later populated by compression algorithms", "write_mode", "=", "False", "if", "mode", ".", "lstrip", "(", "'U'", ")", "[", "0", "]", "==", "'r'", "else", "True", "kwargs", "[", "'mode'", "]", "=", "mode", "# Currently supported compression algorithms", "modules_to_import", "=", "{", "'bz2'", ":", "'BZ2File'", ",", "'gzip'", ":", "'GzipFile'", ",", "'lzma'", ":", "'LZMAFile'", "}", "# Dynamically import compression libraries and warn about failures", "for", "mod", ",", "_class", "in", "modules_to_import", ".", "items", "(", ")", ":", "try", ":", "modules", "[", "_class", "]", "=", "getattr", "(", "import_module", "(", "mod", ")", ",", "_class", ")", "except", "(", "ImportError", ",", "AttributeError", ")", "as", "e", ":", "modules", "[", "_class", "]", "=", "open", "warn", "(", "'Cannot process {0} files due to following error:'", "'{1}{2}{1}You will need to install the {0} library to '", "'properly use these files. Currently, such files will '", "'open in \"text\" mode.'", ".", "format", "(", "mod", ",", "linesep", ",", "e", ")", ")", "# Write mode", "if", "write_mode", "is", "True", ":", "# Map file extensions to decompression classes", "algo_map", "=", "{", "'bz2'", ":", "modules", "[", "'BZ2File'", "]", ",", "'gz'", ":", "modules", "[", "'GzipFile'", "]", ",", "'xz'", ":", "modules", "[", "'LZMAFile'", "]", "}", "# Determine the compression algorithm via the file extension", "ext", "=", "fileobj", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "try", ":", "algo", "=", "algo_map", "[", "ext", "]", "except", "KeyError", ":", "pass", "# Read mode", "else", ":", "algo", "=", "io", ".", "TextIOWrapper", "# Default to plaintext buffer", "# Magic headers of encryption formats", "file_sigs", "=", "{", "b'\\x42\\x5a\\x68'", ":", "modules", "[", "'BZ2File'", "]", ",", "b'\\x1f\\x8b\\x08'", ":", "modules", "[", "'GzipFile'", "]", ",", "b'\\xfd7zXZ\\x00'", ":", "modules", "[", "'LZMAFile'", "]", "}", "# Open the file, buffer it, and identify the compression algorithm", "fileobj", "=", "io", ".", "BufferedReader", "(", "io", ".", "open", "(", "fileobj", ",", "'rb'", ")", ")", "max_len", "=", "max", "(", "len", "(", "x", ")", "for", "x", "in", "file_sigs", ".", "keys", "(", ")", ")", "start", "=", "fileobj", ".", "peek", "(", "max_len", ")", "for", "sig", "in", "file_sigs", ".", "keys", "(", ")", ":", "if", "start", ".", "startswith", "(", "sig", ")", ":", "algo", "=", "file_sigs", "[", "sig", "]", "break", "# Stop iterating once a good signature is found", "# Filter all **kwargs by the args accepted by the compression algorithm", "algo_args", "=", "set", "(", "getfullargspec", "(", "algo", ")", ".", "args", ")", "good_args", "=", "set", "(", "kwargs", ".", "keys", "(", ")", ")", ".", "intersection", "(", "algo_args", ")", "_kwargs", "=", "{", "arg", ":", "kwargs", "[", "arg", "]", "for", "arg", "in", "good_args", "}", "# Open the file using parameters defined above and store in namespace", "if", "write_mode", "is", "True", ":", "handle", "=", "algo", "(", "fileobj", ",", "*", "*", "_kwargs", ")", "else", ":", "try", ":", "# For algorithms that need to be explicitly given a fileobj", "handle", "=", "algo", "(", "fileobj", "=", "fileobj", ",", "*", "*", "_kwargs", ")", "except", "TypeError", ":", "# For algorithms that detect file objects", "handle", "=", "algo", "(", "fileobj", ",", "*", "*", "_kwargs", ")", "return", "handle" ]
Detects and opens compressed file for reading and writing. Args: fileobj (File): any File-like object supported by an underlying compression algorithm mode (unicode): mode to open fileobj with **kwargs: keyword-arguments to pass to the compression algorithm Returns: File: TextWrapper if no compression, else returns appropriate wrapper for the compression type Example: .. code-block:: Python >>> from tempfile import NamedTemporaryFile >>> # Write compressed file >>> temp = NamedTemporaryFile(delete=False, suffix='.bz2') >>> test_bz2 = copen(temp.name, 'wb') >>> test_bz2.write(b'bzip2') >>> test_bz2.close() >>> # Read compressed bzip file >>> test_bz2 = copen(temp.name, 'rb') >>> test_bz2.read() b'bzip2'
[ "Detects", "and", "opens", "compressed", "file", "for", "reading", "and", "writing", "." ]
python
train
34.364486
edaniszewski/colorutils
colorutils/convert.py
https://github.com/edaniszewski/colorutils/blob/bdff54091cb5d62aa8628ce39bc09abd40fb8dd0/colorutils/convert.py#L69-L104
def rgb_to_hsv(rgb): """ Convert an RGB color representation to an HSV color representation. (r, g, b) :: r -> [0, 255] g -> [0, 255] b -> [0, 255] :param rgb: A tuple of three numeric values corresponding to the red, green, and blue value. :return: HSV representation of the input RGB value. :rtype: tuple """ r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255 _min = min(r, g, b) _max = max(r, g, b) v = _max delta = _max - _min if _max == 0: return 0, 0, v s = delta / _max if delta == 0: delta = 1 if r == _max: h = 60 * (((g - b) / delta) % 6) elif g == _max: h = 60 * (((b - r) / delta) + 2) else: h = 60 * (((r - g) / delta) + 4) return round(h, 3), round(s, 3), round(v, 3)
[ "def", "rgb_to_hsv", "(", "rgb", ")", ":", "r", ",", "g", ",", "b", "=", "rgb", "[", "0", "]", "/", "255", ",", "rgb", "[", "1", "]", "/", "255", ",", "rgb", "[", "2", "]", "/", "255", "_min", "=", "min", "(", "r", ",", "g", ",", "b", ")", "_max", "=", "max", "(", "r", ",", "g", ",", "b", ")", "v", "=", "_max", "delta", "=", "_max", "-", "_min", "if", "_max", "==", "0", ":", "return", "0", ",", "0", ",", "v", "s", "=", "delta", "/", "_max", "if", "delta", "==", "0", ":", "delta", "=", "1", "if", "r", "==", "_max", ":", "h", "=", "60", "*", "(", "(", "(", "g", "-", "b", ")", "/", "delta", ")", "%", "6", ")", "elif", "g", "==", "_max", ":", "h", "=", "60", "*", "(", "(", "(", "b", "-", "r", ")", "/", "delta", ")", "+", "2", ")", "else", ":", "h", "=", "60", "*", "(", "(", "(", "r", "-", "g", ")", "/", "delta", ")", "+", "4", ")", "return", "round", "(", "h", ",", "3", ")", ",", "round", "(", "s", ",", "3", ")", ",", "round", "(", "v", ",", "3", ")" ]
Convert an RGB color representation to an HSV color representation. (r, g, b) :: r -> [0, 255] g -> [0, 255] b -> [0, 255] :param rgb: A tuple of three numeric values corresponding to the red, green, and blue value. :return: HSV representation of the input RGB value. :rtype: tuple
[ "Convert", "an", "RGB", "color", "representation", "to", "an", "HSV", "color", "representation", "." ]
python
valid
22.361111
openego/eDisGo
edisgo/data/import_data.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/data/import_data.py#L1948-L1980
def _build_generator_list(network): """Builds DataFrames with all generators in MV and LV grids Returns ------- :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to MV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to LV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to aggregated LV generators """ genos_mv = pd.DataFrame(columns= ('id', 'obj')) genos_lv = pd.DataFrame(columns= ('id', 'obj')) genos_lv_agg = pd.DataFrame(columns= ('la_id', 'id', 'obj')) # MV genos for geno in network.mv_grid.graph.nodes_by_attribute('generator'): genos_mv.loc[len(genos_mv)] = [int(geno.id), geno] for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'): la_id = int(geno.id.split('-')[1].split('_')[-1]) genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno] # LV genos for lv_grid in network.mv_grid.lv_grids: for geno in lv_grid.generators: genos_lv.loc[len(genos_lv)] = [int(geno.id), geno] return genos_mv, genos_lv, genos_lv_agg
[ "def", "_build_generator_list", "(", "network", ")", ":", "genos_mv", "=", "pd", ".", "DataFrame", "(", "columns", "=", "(", "'id'", ",", "'obj'", ")", ")", "genos_lv", "=", "pd", ".", "DataFrame", "(", "columns", "=", "(", "'id'", ",", "'obj'", ")", ")", "genos_lv_agg", "=", "pd", ".", "DataFrame", "(", "columns", "=", "(", "'la_id'", ",", "'id'", ",", "'obj'", ")", ")", "# MV genos", "for", "geno", "in", "network", ".", "mv_grid", ".", "graph", ".", "nodes_by_attribute", "(", "'generator'", ")", ":", "genos_mv", ".", "loc", "[", "len", "(", "genos_mv", ")", "]", "=", "[", "int", "(", "geno", ".", "id", ")", ",", "geno", "]", "for", "geno", "in", "network", ".", "mv_grid", ".", "graph", ".", "nodes_by_attribute", "(", "'generator_aggr'", ")", ":", "la_id", "=", "int", "(", "geno", ".", "id", ".", "split", "(", "'-'", ")", "[", "1", "]", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", ")", "genos_lv_agg", ".", "loc", "[", "len", "(", "genos_lv_agg", ")", "]", "=", "[", "la_id", ",", "geno", ".", "id", ",", "geno", "]", "# LV genos", "for", "lv_grid", "in", "network", ".", "mv_grid", ".", "lv_grids", ":", "for", "geno", "in", "lv_grid", ".", "generators", ":", "genos_lv", ".", "loc", "[", "len", "(", "genos_lv", ")", "]", "=", "[", "int", "(", "geno", ".", "id", ")", ",", "geno", "]", "return", "genos_mv", ",", "genos_lv", ",", "genos_lv_agg" ]
Builds DataFrames with all generators in MV and LV grids Returns ------- :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to MV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to LV generators :pandas:`pandas.DataFrame<dataframe>` A DataFrame with id of and reference to aggregated LV generators
[ "Builds", "DataFrames", "with", "all", "generators", "in", "MV", "and", "LV", "grids" ]
python
train
38.272727
UCBerkeleySETI/blimpy
blimpy/match_fils.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/match_fils.py#L46-L142
def cmd_tool(args=None): """ Command line tool to make a md5sum comparison of two .fil files. """ if 'bl' in local_host: header_loc = '/usr/local/sigproc/bin/header' #Current location of header command in GBT. else: raise IOError('Script only able to run in BL systems.') p = OptionParser() p.set_usage('matchfils <FIL_FILE1> <FIL_FILE2>') opts, args = p.parse_args(sys.argv[1:]) file1 = args[0] file2 = args[1] #------------------------------------ #Create batch script make_batch_script() #------------------------------------ #First checksum headersize1 = find_header_size(file1) file_size1 = os.path.getsize(file1) #Strip header from file, and calculate the md5sum of the rest. #command=['tail','-c',str(file_size1-headersize1),file1,'|','md5sum'] command=['./tail_sum.sh',file1,str(file_size1-headersize1)] print('[matchfils] '+' '.join(command)) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() check_sum1 = out.split()[0] print('[matchfils] Checksum is:', check_sum1) if err: raise Error('There is an error.') #--- out,err = reset_outs() command=[header_loc,file1] print('[matchfils] Header information:') proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() header1 = out print(header1) #------------------------------------ #Second checksum out,err = reset_outs() headersize2 = find_header_size(file2) file_size2 = os.path.getsize(file2) #Strip header from file, and calculate the md5sum of the rest. command=['./tail_sum.sh',file2,str(file_size2-headersize2)] print('[matchfils] '+' '.join(command)) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() check_sum2 = out.split()[0] print('[matchfils] Checksum is:', check_sum2) if err: raise Error('There is an error.') #--- out,err = reset_outs() command=[header_loc,file2] print('[matchfils] Header information:') proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = proc.communicate() header2 = out print(header2) #------------------------------------ #check the checksums if check_sum1 != check_sum2: print('[matchfils] Booo! Checksum does not match between files.') else: print('[matchfils] Hooray! Checksum matches between files.') #------------------------------------ #Remove batch script os.remove('tail_sum.sh')
[ "def", "cmd_tool", "(", "args", "=", "None", ")", ":", "if", "'bl'", "in", "local_host", ":", "header_loc", "=", "'/usr/local/sigproc/bin/header'", "#Current location of header command in GBT.", "else", ":", "raise", "IOError", "(", "'Script only able to run in BL systems.'", ")", "p", "=", "OptionParser", "(", ")", "p", ".", "set_usage", "(", "'matchfils <FIL_FILE1> <FIL_FILE2>'", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "file1", "=", "args", "[", "0", "]", "file2", "=", "args", "[", "1", "]", "#------------------------------------", "#Create batch script", "make_batch_script", "(", ")", "#------------------------------------", "#First checksum", "headersize1", "=", "find_header_size", "(", "file1", ")", "file_size1", "=", "os", ".", "path", ".", "getsize", "(", "file1", ")", "#Strip header from file, and calculate the md5sum of the rest.", "#command=['tail','-c',str(file_size1-headersize1),file1,'|','md5sum']", "command", "=", "[", "'./tail_sum.sh'", ",", "file1", ",", "str", "(", "file_size1", "-", "headersize1", ")", "]", "print", "(", "'[matchfils] '", "+", "' '", ".", "join", "(", "command", ")", ")", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "check_sum1", "=", "out", ".", "split", "(", ")", "[", "0", "]", "print", "(", "'[matchfils] Checksum is:'", ",", "check_sum1", ")", "if", "err", ":", "raise", "Error", "(", "'There is an error.'", ")", "#---", "out", ",", "err", "=", "reset_outs", "(", ")", "command", "=", "[", "header_loc", ",", "file1", "]", "print", "(", "'[matchfils] Header information:'", ")", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "header1", "=", "out", "print", "(", "header1", ")", "#------------------------------------", "#Second checksum", "out", ",", "err", "=", "reset_outs", "(", ")", "headersize2", "=", "find_header_size", "(", "file2", ")", "file_size2", "=", "os", ".", "path", ".", "getsize", "(", "file2", ")", "#Strip header from file, and calculate the md5sum of the rest.", "command", "=", "[", "'./tail_sum.sh'", ",", "file2", ",", "str", "(", "file_size2", "-", "headersize2", ")", "]", "print", "(", "'[matchfils] '", "+", "' '", ".", "join", "(", "command", ")", ")", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "check_sum2", "=", "out", ".", "split", "(", ")", "[", "0", "]", "print", "(", "'[matchfils] Checksum is:'", ",", "check_sum2", ")", "if", "err", ":", "raise", "Error", "(", "'There is an error.'", ")", "#---", "out", ",", "err", "=", "reset_outs", "(", ")", "command", "=", "[", "header_loc", ",", "file2", "]", "print", "(", "'[matchfils] Header information:'", ")", "proc", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "out", ",", "err", ")", "=", "proc", ".", "communicate", "(", ")", "header2", "=", "out", "print", "(", "header2", ")", "#------------------------------------", "#check the checksums", "if", "check_sum1", "!=", "check_sum2", ":", "print", "(", "'[matchfils] Booo! Checksum does not match between files.'", ")", "else", ":", "print", "(", "'[matchfils] Hooray! Checksum matches between files.'", ")", "#------------------------------------", "#Remove batch script", "os", ".", "remove", "(", "'tail_sum.sh'", ")" ]
Command line tool to make a md5sum comparison of two .fil files.
[ "Command", "line", "tool", "to", "make", "a", "md5sum", "comparison", "of", "two", ".", "fil", "files", "." ]
python
test
27.082474
NiklasRosenstein-Python/nr-deprecated
nr/concurrency.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/concurrency.py#L95-L144
def synchronized(obj): """ This function has two purposes: 1. Decorate a function that automatically synchronizes access to the object passed as the first argument (usually `self`, for member methods) 2. Synchronize access to the object, used in a `with`-statement. Note that you can use #wait(), #notify() and #notify_all() only on synchronized objects. # Example ```python class Box(Synchronizable): def __init__(self): self.value = None @synchronized def get(self): return self.value @synchronized def set(self, value): self.value = value box = Box() box.set('foobar') with synchronized(box): box.value = 'taz\'dingo' print(box.get()) ``` # Arguments obj (Synchronizable, function): The object to synchronize access to, or a function to decorate. # Returns 1. The decorated function. 2. The value of `obj.synchronizable_condition`, which should implement the context-manager interface (to be used in a `with`-statement). """ if hasattr(obj, 'synchronizable_condition'): return obj.synchronizable_condition elif callable(obj): @functools.wraps(obj) def wrapper(self, *args, **kwargs): with self.synchronizable_condition: return obj(self, *args, **kwargs) return wrapper else: raise TypeError('expected Synchronizable instance or callable to decorate')
[ "def", "synchronized", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'synchronizable_condition'", ")", ":", "return", "obj", ".", "synchronizable_condition", "elif", "callable", "(", "obj", ")", ":", "@", "functools", ".", "wraps", "(", "obj", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "synchronizable_condition", ":", "return", "obj", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "else", ":", "raise", "TypeError", "(", "'expected Synchronizable instance or callable to decorate'", ")" ]
This function has two purposes: 1. Decorate a function that automatically synchronizes access to the object passed as the first argument (usually `self`, for member methods) 2. Synchronize access to the object, used in a `with`-statement. Note that you can use #wait(), #notify() and #notify_all() only on synchronized objects. # Example ```python class Box(Synchronizable): def __init__(self): self.value = None @synchronized def get(self): return self.value @synchronized def set(self, value): self.value = value box = Box() box.set('foobar') with synchronized(box): box.value = 'taz\'dingo' print(box.get()) ``` # Arguments obj (Synchronizable, function): The object to synchronize access to, or a function to decorate. # Returns 1. The decorated function. 2. The value of `obj.synchronizable_condition`, which should implement the context-manager interface (to be used in a `with`-statement).
[ "This", "function", "has", "two", "purposes", ":" ]
python
train
26.94
funilrys/PyFunceble
PyFunceble/helpers.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/helpers.py#L126-L149
def _hash_file(self, algo): """Get the hash of the given file :param algo: The algorithm to use. :type algo: str :return: The hexdigest of the data. :rtype: str """ # We het the algorithm function. hash_data = getattr(hashlib, algo)() with open(self.path, "rb") as file: # We open an read the parsed path. # We read the content. content = file.read() # We parse the content to the hash algorithm. hash_data.update(content) # And we extract and return the hash. return hash_data.hexdigest()
[ "def", "_hash_file", "(", "self", ",", "algo", ")", ":", "# We het the algorithm function.", "hash_data", "=", "getattr", "(", "hashlib", ",", "algo", ")", "(", ")", "with", "open", "(", "self", ".", "path", ",", "\"rb\"", ")", "as", "file", ":", "# We open an read the parsed path.", "# We read the content.", "content", "=", "file", ".", "read", "(", ")", "# We parse the content to the hash algorithm.", "hash_data", ".", "update", "(", "content", ")", "# And we extract and return the hash.", "return", "hash_data", ".", "hexdigest", "(", ")" ]
Get the hash of the given file :param algo: The algorithm to use. :type algo: str :return: The hexdigest of the data. :rtype: str
[ "Get", "the", "hash", "of", "the", "given", "file" ]
python
test
25.875
bitesofcode/projex
projex/urls.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/urls.py#L13-L63
def build(path, query=None, fragment=''): """ Generates a URL based on the inputted path and given query options and fragment. The query should be a dictionary of terms that will be generated into the URL, while the fragment is the anchor point within the target path that will be navigated to. If there are any wildcards within the path that are found within the query, they will be inserted into the path itself and removed from the query string. :example |>>> import skyline.gui |>>> skyline.gui.build_url('sky://projects/%(project)s', | {'project': 'Test', 'asset': 'Bob'}) |'sky://projects/Test/?asset=Bob' :param path | <str> query | <dict> || None fragment | <str> || None :return <str> | url """ url = nstr(path) # replace the optional arguments in the url keys = projex.text.findkeys(path) if keys: if query is None: query = {} opts = {} for key in keys: opts[key] = query.pop(key, '%({})s'.format(key)) url %= opts # add the query if query: if type(query) is dict: mapped_query = {} for key, value in query.items(): mapped_query[nstr(key)] = nstr(value) query_str = urllib.urlencode(mapped_query) else: query_str = nstr(query) url += '?' + query_str # include the fragment if fragment: url += '#' + fragment return url
[ "def", "build", "(", "path", ",", "query", "=", "None", ",", "fragment", "=", "''", ")", ":", "url", "=", "nstr", "(", "path", ")", "# replace the optional arguments in the url", "keys", "=", "projex", ".", "text", ".", "findkeys", "(", "path", ")", "if", "keys", ":", "if", "query", "is", "None", ":", "query", "=", "{", "}", "opts", "=", "{", "}", "for", "key", "in", "keys", ":", "opts", "[", "key", "]", "=", "query", ".", "pop", "(", "key", ",", "'%({})s'", ".", "format", "(", "key", ")", ")", "url", "%=", "opts", "# add the query", "if", "query", ":", "if", "type", "(", "query", ")", "is", "dict", ":", "mapped_query", "=", "{", "}", "for", "key", ",", "value", "in", "query", ".", "items", "(", ")", ":", "mapped_query", "[", "nstr", "(", "key", ")", "]", "=", "nstr", "(", "value", ")", "query_str", "=", "urllib", ".", "urlencode", "(", "mapped_query", ")", "else", ":", "query_str", "=", "nstr", "(", "query", ")", "url", "+=", "'?'", "+", "query_str", "# include the fragment", "if", "fragment", ":", "url", "+=", "'#'", "+", "fragment", "return", "url" ]
Generates a URL based on the inputted path and given query options and fragment. The query should be a dictionary of terms that will be generated into the URL, while the fragment is the anchor point within the target path that will be navigated to. If there are any wildcards within the path that are found within the query, they will be inserted into the path itself and removed from the query string. :example |>>> import skyline.gui |>>> skyline.gui.build_url('sky://projects/%(project)s', | {'project': 'Test', 'asset': 'Bob'}) |'sky://projects/Test/?asset=Bob' :param path | <str> query | <dict> || None fragment | <str> || None :return <str> | url
[ "Generates", "a", "URL", "based", "on", "the", "inputted", "path", "and", "given", "query", "options", "and", "fragment", ".", "The", "query", "should", "be", "a", "dictionary", "of", "terms", "that", "will", "be", "generated", "into", "the", "URL", "while", "the", "fragment", "is", "the", "anchor", "point", "within", "the", "target", "path", "that", "will", "be", "navigated", "to", ".", "If", "there", "are", "any", "wildcards", "within", "the", "path", "that", "are", "found", "within", "the", "query", "they", "will", "be", "inserted", "into", "the", "path", "itself", "and", "removed", "from", "the", "query", "string", ".", ":", "example", "|", ">>>", "import", "skyline", ".", "gui", "|", ">>>", "skyline", ".", "gui", ".", "build_url", "(", "sky", ":", "//", "projects", "/", "%", "(", "project", ")", "s", "|", "{", "project", ":", "Test", "asset", ":", "Bob", "}", ")", "|", "sky", ":", "//", "projects", "/", "Test", "/", "?asset", "=", "Bob", ":", "param", "path", "|", "<str", ">", "query", "|", "<dict", ">", "||", "None", "fragment", "|", "<str", ">", "||", "None", ":", "return", "<str", ">", "|", "url" ]
python
train
30.72549
RedHatInsights/insights-core
insights/core/dr.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L745-L767
def add_observer(self, o, component_type=ComponentType): """ Add a callback that will get invoked after each component is called. Args: o (func): the callback function Keyword Args: component_type (ComponentType): the :class:`ComponentType` to observe. The callback will fire any time an instance of the class or its subclasses is invoked. The callback should look like this: .. code-block:: python def callback(comp, broker): value = broker.get(comp) # do something with value pass """ self.observers[component_type].add(o)
[ "def", "add_observer", "(", "self", ",", "o", ",", "component_type", "=", "ComponentType", ")", ":", "self", ".", "observers", "[", "component_type", "]", ".", "add", "(", "o", ")" ]
Add a callback that will get invoked after each component is called. Args: o (func): the callback function Keyword Args: component_type (ComponentType): the :class:`ComponentType` to observe. The callback will fire any time an instance of the class or its subclasses is invoked. The callback should look like this: .. code-block:: python def callback(comp, broker): value = broker.get(comp) # do something with value pass
[ "Add", "a", "callback", "that", "will", "get", "invoked", "after", "each", "component", "is", "called", "." ]
python
train
29.913043
bcbio/bcbio-nextgen
bcbio/graph/graph.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L89-L99
def this_and_prev(iterable): """Walk an iterable, returning the current and previous items as a two-tuple.""" try: item = next(iterable) while True: next_item = next(iterable) yield item, next_item item = next_item except StopIteration: return
[ "def", "this_and_prev", "(", "iterable", ")", ":", "try", ":", "item", "=", "next", "(", "iterable", ")", "while", "True", ":", "next_item", "=", "next", "(", "iterable", ")", "yield", "item", ",", "next_item", "item", "=", "next_item", "except", "StopIteration", ":", "return" ]
Walk an iterable, returning the current and previous items as a two-tuple.
[ "Walk", "an", "iterable", "returning", "the", "current", "and", "previous", "items", "as", "a", "two", "-", "tuple", "." ]
python
train
28.090909
Dallinger/Dallinger
dallinger/command_line.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/command_line.py#L740-L753
def monitor(app): """Set up application monitoring.""" heroku_app = HerokuApp(dallinger_uid=app) webbrowser.open(heroku_app.dashboard_url) webbrowser.open("https://requester.mturk.com/mturk/manageHITs") heroku_app.open_logs() check_call(["open", heroku_app.db_uri]) while _keep_running(): summary = get_summary(app) click.clear() click.echo(header) click.echo("\nExperiment {}\n".format(app)) click.echo(summary) time.sleep(10)
[ "def", "monitor", "(", "app", ")", ":", "heroku_app", "=", "HerokuApp", "(", "dallinger_uid", "=", "app", ")", "webbrowser", ".", "open", "(", "heroku_app", ".", "dashboard_url", ")", "webbrowser", ".", "open", "(", "\"https://requester.mturk.com/mturk/manageHITs\"", ")", "heroku_app", ".", "open_logs", "(", ")", "check_call", "(", "[", "\"open\"", ",", "heroku_app", ".", "db_uri", "]", ")", "while", "_keep_running", "(", ")", ":", "summary", "=", "get_summary", "(", "app", ")", "click", ".", "clear", "(", ")", "click", ".", "echo", "(", "header", ")", "click", ".", "echo", "(", "\"\\nExperiment {}\\n\"", ".", "format", "(", "app", ")", ")", "click", ".", "echo", "(", "summary", ")", "time", ".", "sleep", "(", "10", ")" ]
Set up application monitoring.
[ "Set", "up", "application", "monitoring", "." ]
python
train
35
loli/medpy
medpy/graphcut/energy_voxel.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/energy_voxel.py#L227-L288
def boundary_difference_exponential(graph, xxx_todo_changeme4): r""" Boundary term processing adjacent voxels difference value using an exponential relationship. An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. Finds all edges between all neighbours of the image and uses their difference in intensity values as edge weight. The weights are normalized using an exponential function and a smoothing factor :math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its ideal settings differ greatly from application to application. The weights between two neighbouring voxels :math:`(p, q)` is then computed as .. math:: w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}} , for which :math:`w(p, q) \in (0, 1]` holds true. When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice thickness. Set this parameter to `False` for equally weighted edges. Parameters ---------- graph : GCGraph The graph to add the weights to. original_image : ndarray The original image. sigma : float The sigma parameter to use in the boundary term. spacing : sequence of float or False A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. Notes ----- This function requires the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the original image. """ (original_image, sigma, spacing) = xxx_todo_changeme4 original_image = scipy.asarray(original_image) def boundary_term_exponential(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply exp-(x**2/sigma**2) intensities = scipy.power(intensities, 2) intensities /= math.pow(sigma, 2) intensities *= -1 intensities = scipy.exp(intensities) intensities[intensities <= 0] = sys.float_info.min return intensities __skeleton_difference(graph, original_image, boundary_term_exponential, spacing)
[ "def", "boundary_difference_exponential", "(", "graph", ",", "xxx_todo_changeme4", ")", ":", "(", "original_image", ",", "sigma", ",", "spacing", ")", "=", "xxx_todo_changeme4", "original_image", "=", "scipy", ".", "asarray", "(", "original_image", ")", "def", "boundary_term_exponential", "(", "intensities", ")", ":", "\"\"\"\n Implementation of a exponential boundary term computation over an array.\n \"\"\"", "# apply exp-(x**2/sigma**2)", "intensities", "=", "scipy", ".", "power", "(", "intensities", ",", "2", ")", "intensities", "/=", "math", ".", "pow", "(", "sigma", ",", "2", ")", "intensities", "*=", "-", "1", "intensities", "=", "scipy", ".", "exp", "(", "intensities", ")", "intensities", "[", "intensities", "<=", "0", "]", "=", "sys", ".", "float_info", ".", "min", "return", "intensities", "__skeleton_difference", "(", "graph", ",", "original_image", ",", "boundary_term_exponential", ",", "spacing", ")" ]
r""" Boundary term processing adjacent voxels difference value using an exponential relationship. An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. Finds all edges between all neighbours of the image and uses their difference in intensity values as edge weight. The weights are normalized using an exponential function and a smoothing factor :math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its ideal settings differ greatly from application to application. The weights between two neighbouring voxels :math:`(p, q)` is then computed as .. math:: w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}} , for which :math:`w(p, q) \in (0, 1]` holds true. When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice thickness. Set this parameter to `False` for equally weighted edges. Parameters ---------- graph : GCGraph The graph to add the weights to. original_image : ndarray The original image. sigma : float The sigma parameter to use in the boundary term. spacing : sequence of float or False A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. Notes ----- This function requires the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the original image.
[ "r", "Boundary", "term", "processing", "adjacent", "voxels", "difference", "value", "using", "an", "exponential", "relationship", ".", "An", "implementation", "of", "a", "boundary", "term", "suitable", "to", "be", "used", "with", "the", "~medpy", ".", "graphcut", ".", "generate", ".", "graph_from_voxels", "function", ".", "Finds", "all", "edges", "between", "all", "neighbours", "of", "the", "image", "and", "uses", "their", "difference", "in", "intensity", "values", "as", "edge", "weight", ".", "The", "weights", "are", "normalized", "using", "an", "exponential", "function", "and", "a", "smoothing", "factor", ":", "math", ":", "\\", "sigma", ".", "The", ":", "math", ":", "\\", "sigma", "value", "has", "to", "be", "supplied", "manually", "since", "its", "ideal", "settings", "differ", "greatly", "from", "application", "to", "application", ".", "The", "weights", "between", "two", "neighbouring", "voxels", ":", "math", ":", "(", "p", "q", ")", "is", "then", "computed", "as", "..", "math", "::", "w", "(", "p", "q", ")", "=", "\\", "exp^", "{", "-", "\\", "frac", "{", "|I_p", "-", "I_q|^2", "}", "{", "\\", "sigma^2", "}}", "for", "which", ":", "math", ":", "w", "(", "p", "q", ")", "\\", "in", "(", "0", "1", "]", "holds", "true", ".", "When", "the", "created", "edge", "weights", "should", "be", "weighted", "according", "to", "the", "slice", "distance", "provide", "the", "list", "of", "slice", "thicknesses", "via", "the", "spacing", "parameter", ".", "Then", "all", "weights", "computed", "for", "the", "corresponding", "direction", "are", "divided", "by", "the", "respective", "slice", "thickness", ".", "Set", "this", "parameter", "to", "False", "for", "equally", "weighted", "edges", ".", "Parameters", "----------", "graph", ":", "GCGraph", "The", "graph", "to", "add", "the", "weights", "to", ".", "original_image", ":", "ndarray", "The", "original", "image", ".", "sigma", ":", "float", "The", "sigma", "parameter", "to", "use", "in", "the", "boundary", "term", ".", "spacing", ":", "sequence", "of", "float", "or", "False", "A", "sequence", "containing", "the", "slice", "spacing", "used", "for", "weighting", "the", "computed", "neighbourhood", "weight", "value", "for", "different", "dimensions", ".", "If", "False", "no", "distance", "based", "weighting", "of", "the", "graph", "edges", "is", "performed", ".", "Notes", "-----", "This", "function", "requires", "the", "original", "image", "to", "be", "passed", "along", ".", "That", "means", "that", "~medpy", ".", "graphcut", ".", "generate", ".", "graph_from_voxels", "has", "to", "be", "called", "with", "boundary_term_args", "set", "to", "the", "original", "image", "." ]
python
train
41.209677
ejeschke/ginga
ginga/misc/ModuleManager.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/ModuleManager.py#L82-L88
def get_module(self, module_name): """Return loaded module from the given name.""" try: return self.module[module_name] except KeyError: return sys.modules[module_name]
[ "def", "get_module", "(", "self", ",", "module_name", ")", ":", "try", ":", "return", "self", ".", "module", "[", "module_name", "]", "except", "KeyError", ":", "return", "sys", ".", "modules", "[", "module_name", "]" ]
Return loaded module from the given name.
[ "Return", "loaded", "module", "from", "the", "given", "name", "." ]
python
train
30.142857
mitsei/dlkit
dlkit/records/repository/vcb/vcb_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/repository/vcb/vcb_records.py#L72-L105
def _init_metadata(self): """stub""" self._start_timestamp_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'start_timestamp'), 'element_label': 'start timestamp', 'instructions': 'enter an integer number of seconds for the start time', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'syntax': 'INTEGER', 'minimum_integer': 0, 'maximum_integer': None, 'integer_set': [], 'default_integer_values': [0] } self._end_timestamp_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'end_timestamp'), 'element_label': 'end timestamp', 'instructions': 'enter an integer number of seconds for the end time', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'syntax': 'INTEGER', 'minimum_integer': 0, 'maximum_integer': None, 'integer_set': [], 'default_integer_values': [0] }
[ "def", "_init_metadata", "(", "self", ")", ":", "self", ".", "_start_timestamp_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'start_timestamp'", ")", ",", "'element_label'", ":", "'start timestamp'", ",", "'instructions'", ":", "'enter an integer number of seconds for the start time'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'syntax'", ":", "'INTEGER'", ",", "'minimum_integer'", ":", "0", ",", "'maximum_integer'", ":", "None", ",", "'integer_set'", ":", "[", "]", ",", "'default_integer_values'", ":", "[", "0", "]", "}", "self", ".", "_end_timestamp_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'end_timestamp'", ")", ",", "'element_label'", ":", "'end timestamp'", ",", "'instructions'", ":", "'enter an integer number of seconds for the end time'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'syntax'", ":", "'INTEGER'", ",", "'minimum_integer'", ":", "0", ",", "'maximum_integer'", ":", "None", ",", "'integer_set'", ":", "[", "]", ",", "'default_integer_values'", ":", "[", "0", "]", "}" ]
stub
[ "stub" ]
python
train
39.147059
apache/incubator-mxnet
python/mxnet/symbol/random.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/random.py#L116-L143
def poisson(lam=1, shape=_Null, dtype=_Null, **kwargs): """Draw random samples from a Poisson distribution. Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). Samples will always be returned as a floating point data type. Parameters ---------- lam : float or Symbol, optional Expectation of interval, should be >= 0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. """ return _random_helper(_internal._random_poisson, _internal._sample_poisson, [lam], shape, dtype, kwargs)
[ "def", "poisson", "(", "lam", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_internal", ".", "_random_poisson", ",", "_internal", ".", "_sample_poisson", ",", "[", "lam", "]", ",", "shape", ",", "dtype", ",", "kwargs", ")" ]
Draw random samples from a Poisson distribution. Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). Samples will always be returned as a floating point data type. Parameters ---------- lam : float or Symbol, optional Expectation of interval, should be >= 0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is a scalar, output shape will be `(m, n)`. If `lam` is an Symbol with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
[ "Draw", "random", "samples", "from", "a", "Poisson", "distribution", "." ]
python
train
45.678571
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L1114-L1128
def _zoom_rows(self, zoom): """Zooms grid rows""" self.grid.SetDefaultRowSize(self.grid.std_row_size * zoom, resizeExistingRows=True) self.grid.SetRowLabelSize(self.grid.row_label_size * zoom) for row, tab in self.code_array.row_heights: if tab == self.grid.current_table and \ row < self.grid.code_array.shape[0]: base_row_width = self.code_array.row_heights[(row, tab)] if base_row_width is None: base_row_width = self.grid.GetDefaultRowSize() zoomed_row_size = base_row_width * zoom self.grid.SetRowSize(row, zoomed_row_size)
[ "def", "_zoom_rows", "(", "self", ",", "zoom", ")", ":", "self", ".", "grid", ".", "SetDefaultRowSize", "(", "self", ".", "grid", ".", "std_row_size", "*", "zoom", ",", "resizeExistingRows", "=", "True", ")", "self", ".", "grid", ".", "SetRowLabelSize", "(", "self", ".", "grid", ".", "row_label_size", "*", "zoom", ")", "for", "row", ",", "tab", "in", "self", ".", "code_array", ".", "row_heights", ":", "if", "tab", "==", "self", ".", "grid", ".", "current_table", "and", "row", "<", "self", ".", "grid", ".", "code_array", ".", "shape", "[", "0", "]", ":", "base_row_width", "=", "self", ".", "code_array", ".", "row_heights", "[", "(", "row", ",", "tab", ")", "]", "if", "base_row_width", "is", "None", ":", "base_row_width", "=", "self", ".", "grid", ".", "GetDefaultRowSize", "(", ")", "zoomed_row_size", "=", "base_row_width", "*", "zoom", "self", ".", "grid", ".", "SetRowSize", "(", "row", ",", "zoomed_row_size", ")" ]
Zooms grid rows
[ "Zooms", "grid", "rows" ]
python
train
46.333333
fastai/fastai
fastai/text/models/awd_lstm.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/models/awd_lstm.py#L165-L168
def awd_lstm_lm_split(model:nn.Module) -> List[nn.Module]: "Split a RNN `model` in groups for differential learning rates." groups = [[rnn, dp] for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)] return groups + [[model[0].encoder, model[0].encoder_dp, model[1]]]
[ "def", "awd_lstm_lm_split", "(", "model", ":", "nn", ".", "Module", ")", "->", "List", "[", "nn", ".", "Module", "]", ":", "groups", "=", "[", "[", "rnn", ",", "dp", "]", "for", "rnn", ",", "dp", "in", "zip", "(", "model", "[", "0", "]", ".", "rnns", ",", "model", "[", "0", "]", ".", "hidden_dps", ")", "]", "return", "groups", "+", "[", "[", "model", "[", "0", "]", ".", "encoder", ",", "model", "[", "0", "]", ".", "encoder_dp", ",", "model", "[", "1", "]", "]", "]" ]
Split a RNN `model` in groups for differential learning rates.
[ "Split", "a", "RNN", "model", "in", "groups", "for", "differential", "learning", "rates", "." ]
python
train
69
gbowerman/azurerm
azurerm/computerp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L107-L193
def create_vmss(access_token, subscription_id, resource_group, vmss_name, vm_size, capacity, publisher, offer, sku, version, subnet_id, location, be_pool_id=None, lb_pool_id=None, storage_type='Standard_LRS', username='azure', password=None, public_key=None, overprovision=True, upgrade_policy='Manual', public_ip_per_vm=False): '''Create virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'. capacity (int): Number of VMs in the scale set. 0-1000. publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'. offer (str): VM image offer. E.g. 'WindowsServer'. sku (str): VM image sku. E.g. '2016-Datacenter'. version (str): VM image version. E.g. 'latest'. subnet_id (str): Resource id of a subnet. location (str): Azure data center location. E.g. westus. be_pool_id (str): Resource id of a backend NAT pool. lb_pool_id (str): Resource id of a load balancer pool. storage_type (str): Optional storage type. Default 'Standard_LRS'. username (str): Optional user name. Default is 'azure'. password (str): Optional password. Default is None (not required if using public_key). public_key (str): Optional public key. Default is None (not required if using password, e.g. on Windows). overprovision (bool): Optional. Enable overprovisioning of VMs. Default True. upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling. Default 'Manual'. public_ip_per_vm (bool): Optional. Set public IP per VM. Default False. Returns: HTTP response. JSON body of the virtual machine scale set properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) vmss_body = {'location': location} vmss_sku = {'name': vm_size, 'tier': 'Standard', 'capacity': capacity} vmss_body['sku'] = vmss_sku properties = {'overprovision': overprovision} properties['upgradePolicy'] = {'mode': upgrade_policy} os_profile = {'computerNamePrefix': vmss_name} os_profile['adminUsername'] = username if password is not None: os_profile['adminPassword'] = password if public_key is not None: if password is None: disable_pswd = True else: disable_pswd = False linux_config = {'disablePasswordAuthentication': disable_pswd} pub_key = {'path': '/home/' + username + '/.ssh/authorized_keys'} pub_key['keyData'] = public_key linux_config['ssh'] = {'publicKeys': [pub_key]} os_profile['linuxConfiguration'] = linux_config vm_profile = {'osProfile': os_profile} os_disk = {'createOption': 'fromImage'} os_disk['managedDisk'] = {'storageAccountType': storage_type} os_disk['caching'] = 'ReadWrite' storage_profile = {'osDisk': os_disk} storage_profile['imageReference'] = \ {'publisher': publisher, 'offer': offer, 'sku': sku, 'version': version} vm_profile['storageProfile'] = storage_profile nic = {'name': vmss_name} ip_config = {'name': vmss_name} ip_properties = {'subnet': {'id': subnet_id}} if be_pool_id is not None: ip_properties['loadBalancerBackendAddressPools'] = [{'id': be_pool_id}] if lb_pool_id is not None: ip_properties['loadBalancerInboundNatPools'] = [{'id': lb_pool_id}] if public_ip_per_vm is True: ip_properties['publicIpAddressConfiguration'] = { 'name': 'pubip', 'properties': {'idleTimeoutInMinutes': 15}} ip_config['properties'] = ip_properties nic['properties'] = {'primary': True, 'ipConfigurations': [ip_config]} network_profile = {'networkInterfaceConfigurations': [nic]} vm_profile['networkProfile'] = network_profile properties['virtualMachineProfile'] = vm_profile vmss_body['properties'] = properties body = json.dumps(vmss_body) return do_put(endpoint, body, access_token)
[ "def", "create_vmss", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "vmss_name", ",", "vm_size", ",", "capacity", ",", "publisher", ",", "offer", ",", "sku", ",", "version", ",", "subnet_id", ",", "location", ",", "be_pool_id", "=", "None", ",", "lb_pool_id", "=", "None", ",", "storage_type", "=", "'Standard_LRS'", ",", "username", "=", "'azure'", ",", "password", "=", "None", ",", "public_key", "=", "None", ",", "overprovision", "=", "True", ",", "upgrade_policy", "=", "'Manual'", ",", "public_ip_per_vm", "=", "False", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Compute/virtualMachineScaleSets/'", ",", "vmss_name", ",", "'?api-version='", ",", "COMP_API", "]", ")", "vmss_body", "=", "{", "'location'", ":", "location", "}", "vmss_sku", "=", "{", "'name'", ":", "vm_size", ",", "'tier'", ":", "'Standard'", ",", "'capacity'", ":", "capacity", "}", "vmss_body", "[", "'sku'", "]", "=", "vmss_sku", "properties", "=", "{", "'overprovision'", ":", "overprovision", "}", "properties", "[", "'upgradePolicy'", "]", "=", "{", "'mode'", ":", "upgrade_policy", "}", "os_profile", "=", "{", "'computerNamePrefix'", ":", "vmss_name", "}", "os_profile", "[", "'adminUsername'", "]", "=", "username", "if", "password", "is", "not", "None", ":", "os_profile", "[", "'adminPassword'", "]", "=", "password", "if", "public_key", "is", "not", "None", ":", "if", "password", "is", "None", ":", "disable_pswd", "=", "True", "else", ":", "disable_pswd", "=", "False", "linux_config", "=", "{", "'disablePasswordAuthentication'", ":", "disable_pswd", "}", "pub_key", "=", "{", "'path'", ":", "'/home/'", "+", "username", "+", "'/.ssh/authorized_keys'", "}", "pub_key", "[", "'keyData'", "]", "=", "public_key", "linux_config", "[", "'ssh'", "]", "=", "{", "'publicKeys'", ":", "[", "pub_key", "]", "}", "os_profile", "[", "'linuxConfiguration'", "]", "=", "linux_config", "vm_profile", "=", "{", "'osProfile'", ":", "os_profile", "}", "os_disk", "=", "{", "'createOption'", ":", "'fromImage'", "}", "os_disk", "[", "'managedDisk'", "]", "=", "{", "'storageAccountType'", ":", "storage_type", "}", "os_disk", "[", "'caching'", "]", "=", "'ReadWrite'", "storage_profile", "=", "{", "'osDisk'", ":", "os_disk", "}", "storage_profile", "[", "'imageReference'", "]", "=", "{", "'publisher'", ":", "publisher", ",", "'offer'", ":", "offer", ",", "'sku'", ":", "sku", ",", "'version'", ":", "version", "}", "vm_profile", "[", "'storageProfile'", "]", "=", "storage_profile", "nic", "=", "{", "'name'", ":", "vmss_name", "}", "ip_config", "=", "{", "'name'", ":", "vmss_name", "}", "ip_properties", "=", "{", "'subnet'", ":", "{", "'id'", ":", "subnet_id", "}", "}", "if", "be_pool_id", "is", "not", "None", ":", "ip_properties", "[", "'loadBalancerBackendAddressPools'", "]", "=", "[", "{", "'id'", ":", "be_pool_id", "}", "]", "if", "lb_pool_id", "is", "not", "None", ":", "ip_properties", "[", "'loadBalancerInboundNatPools'", "]", "=", "[", "{", "'id'", ":", "lb_pool_id", "}", "]", "if", "public_ip_per_vm", "is", "True", ":", "ip_properties", "[", "'publicIpAddressConfiguration'", "]", "=", "{", "'name'", ":", "'pubip'", ",", "'properties'", ":", "{", "'idleTimeoutInMinutes'", ":", "15", "}", "}", "ip_config", "[", "'properties'", "]", "=", "ip_properties", "nic", "[", "'properties'", "]", "=", "{", "'primary'", ":", "True", ",", "'ipConfigurations'", ":", "[", "ip_config", "]", "}", "network_profile", "=", "{", "'networkInterfaceConfigurations'", ":", "[", "nic", "]", "}", "vm_profile", "[", "'networkProfile'", "]", "=", "network_profile", "properties", "[", "'virtualMachineProfile'", "]", "=", "vm_profile", "vmss_body", "[", "'properties'", "]", "=", "properties", "body", "=", "json", ".", "dumps", "(", "vmss_body", ")", "return", "do_put", "(", "endpoint", ",", "body", ",", "access_token", ")" ]
Create virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vm_size (str): Size of virtual machine, e.g. 'Standard_D1_v2'. capacity (int): Number of VMs in the scale set. 0-1000. publisher (str): VM image publisher. E.g. 'MicrosoftWindowsServer'. offer (str): VM image offer. E.g. 'WindowsServer'. sku (str): VM image sku. E.g. '2016-Datacenter'. version (str): VM image version. E.g. 'latest'. subnet_id (str): Resource id of a subnet. location (str): Azure data center location. E.g. westus. be_pool_id (str): Resource id of a backend NAT pool. lb_pool_id (str): Resource id of a load balancer pool. storage_type (str): Optional storage type. Default 'Standard_LRS'. username (str): Optional user name. Default is 'azure'. password (str): Optional password. Default is None (not required if using public_key). public_key (str): Optional public key. Default is None (not required if using password, e.g. on Windows). overprovision (bool): Optional. Enable overprovisioning of VMs. Default True. upgrade_policy (str): Optional. Set upgrade policy to Automatic, Manual or Rolling. Default 'Manual'. public_ip_per_vm (bool): Optional. Set public IP per VM. Default False. Returns: HTTP response. JSON body of the virtual machine scale set properties.
[ "Create", "virtual", "machine", "scale", "set", "." ]
python
train
51.011494
saltstack/salt
salt/pillar/pillar_ldap.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/pillar_ldap.py#L270-L306
def _do_search(conf): ''' Builds connection and search arguments, performs the LDAP search and formats the results as a dictionary appropriate for pillar use. ''' # Build LDAP connection args connargs = {} for name in ['server', 'port', 'tls', 'binddn', 'bindpw', 'anonymous']: connargs[name] = _config(name, conf) if connargs['binddn'] and connargs['bindpw']: connargs['anonymous'] = False # Build search args try: _filter = conf['filter'] except KeyError: raise SaltInvocationError('missing filter') _dn = _config('dn', conf) scope = _config('scope', conf) _lists = _config('lists', conf) or [] _attrs = _config('attrs', conf) or [] _dict_key_attr = _config('dict_key_attr', conf, 'dn') attrs = _lists + _attrs + [_dict_key_attr] if not attrs: attrs = None # Perform the search try: result = __salt__['ldap.search'](_filter, _dn, scope, attrs, **connargs)['results'] except IndexError: # we got no results for this search log.debug('LDAP search returned no results for filter %s', _filter) result = {} except Exception: log.critical( 'Failed to retrieve pillar data from LDAP:\n', exc_info=True ) return {} return result
[ "def", "_do_search", "(", "conf", ")", ":", "# Build LDAP connection args", "connargs", "=", "{", "}", "for", "name", "in", "[", "'server'", ",", "'port'", ",", "'tls'", ",", "'binddn'", ",", "'bindpw'", ",", "'anonymous'", "]", ":", "connargs", "[", "name", "]", "=", "_config", "(", "name", ",", "conf", ")", "if", "connargs", "[", "'binddn'", "]", "and", "connargs", "[", "'bindpw'", "]", ":", "connargs", "[", "'anonymous'", "]", "=", "False", "# Build search args", "try", ":", "_filter", "=", "conf", "[", "'filter'", "]", "except", "KeyError", ":", "raise", "SaltInvocationError", "(", "'missing filter'", ")", "_dn", "=", "_config", "(", "'dn'", ",", "conf", ")", "scope", "=", "_config", "(", "'scope'", ",", "conf", ")", "_lists", "=", "_config", "(", "'lists'", ",", "conf", ")", "or", "[", "]", "_attrs", "=", "_config", "(", "'attrs'", ",", "conf", ")", "or", "[", "]", "_dict_key_attr", "=", "_config", "(", "'dict_key_attr'", ",", "conf", ",", "'dn'", ")", "attrs", "=", "_lists", "+", "_attrs", "+", "[", "_dict_key_attr", "]", "if", "not", "attrs", ":", "attrs", "=", "None", "# Perform the search", "try", ":", "result", "=", "__salt__", "[", "'ldap.search'", "]", "(", "_filter", ",", "_dn", ",", "scope", ",", "attrs", ",", "*", "*", "connargs", ")", "[", "'results'", "]", "except", "IndexError", ":", "# we got no results for this search", "log", ".", "debug", "(", "'LDAP search returned no results for filter %s'", ",", "_filter", ")", "result", "=", "{", "}", "except", "Exception", ":", "log", ".", "critical", "(", "'Failed to retrieve pillar data from LDAP:\\n'", ",", "exc_info", "=", "True", ")", "return", "{", "}", "return", "result" ]
Builds connection and search arguments, performs the LDAP search and formats the results as a dictionary appropriate for pillar use.
[ "Builds", "connection", "and", "search", "arguments", "performs", "the", "LDAP", "search", "and", "formats", "the", "results", "as", "a", "dictionary", "appropriate", "for", "pillar", "use", "." ]
python
train
35.675676
lesscpy/lesscpy
lesscpy/plib/block.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/block.py#L199-L213
def copy_inner(self, scope): """Copy block contents (properties, inner blocks). Renames inner block from current scope. Used for mixins. args: scope (Scope): Current scope returns: list (block contents) """ if self.tokens[1]: tokens = [u.copy() if u else u for u in self.tokens[1]] out = [p for p in tokens if p] utility.rename(out, scope, Block) return out return None
[ "def", "copy_inner", "(", "self", ",", "scope", ")", ":", "if", "self", ".", "tokens", "[", "1", "]", ":", "tokens", "=", "[", "u", ".", "copy", "(", ")", "if", "u", "else", "u", "for", "u", "in", "self", ".", "tokens", "[", "1", "]", "]", "out", "=", "[", "p", "for", "p", "in", "tokens", "if", "p", "]", "utility", ".", "rename", "(", "out", ",", "scope", ",", "Block", ")", "return", "out", "return", "None" ]
Copy block contents (properties, inner blocks). Renames inner block from current scope. Used for mixins. args: scope (Scope): Current scope returns: list (block contents)
[ "Copy", "block", "contents", "(", "properties", "inner", "blocks", ")", ".", "Renames", "inner", "block", "from", "current", "scope", ".", "Used", "for", "mixins", ".", "args", ":", "scope", "(", "Scope", ")", ":", "Current", "scope", "returns", ":", "list", "(", "block", "contents", ")" ]
python
valid
32.733333
xflr6/graphviz
graphviz/files.py
https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/files.py#L136-L160
def save(self, filename=None, directory=None): """Save the DOT source to file. Ensure the file ends with a newline. Args: filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) directory: (Sub)directory for source saving and rendering. Returns: The (possibly relative) path of the saved source file. """ if filename is not None: self.filename = filename if directory is not None: self.directory = directory filepath = self.filepath tools.mkdirs(filepath) data = text_type(self.source) with io.open(filepath, 'w', encoding=self.encoding) as fd: fd.write(data) if not data.endswith(u'\n'): fd.write(u'\n') return filepath
[ "def", "save", "(", "self", ",", "filename", "=", "None", ",", "directory", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "self", ".", "filename", "=", "filename", "if", "directory", "is", "not", "None", ":", "self", ".", "directory", "=", "directory", "filepath", "=", "self", ".", "filepath", "tools", ".", "mkdirs", "(", "filepath", ")", "data", "=", "text_type", "(", "self", ".", "source", ")", "with", "io", ".", "open", "(", "filepath", ",", "'w'", ",", "encoding", "=", "self", ".", "encoding", ")", "as", "fd", ":", "fd", ".", "write", "(", "data", ")", "if", "not", "data", ".", "endswith", "(", "u'\\n'", ")", ":", "fd", ".", "write", "(", "u'\\n'", ")", "return", "filepath" ]
Save the DOT source to file. Ensure the file ends with a newline. Args: filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) directory: (Sub)directory for source saving and rendering. Returns: The (possibly relative) path of the saved source file.
[ "Save", "the", "DOT", "source", "to", "file", ".", "Ensure", "the", "file", "ends", "with", "a", "newline", "." ]
python
train
32.36
PythonCharmers/python-future
src/future/backports/email/quoprimime.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/quoprimime.py#L167-L170
def newline(self): """Write eol, then start new line.""" self.write_str(self.eol) self.room = self.maxlinelen
[ "def", "newline", "(", "self", ")", ":", "self", ".", "write_str", "(", "self", ".", "eol", ")", "self", ".", "room", "=", "self", ".", "maxlinelen" ]
Write eol, then start new line.
[ "Write", "eol", "then", "start", "new", "line", "." ]
python
train
32.5
vatlab/SoS
src/sos/converter.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/converter.py#L111-L163
def script_to_html(script_file, html_file, args=None, unknown_args=None): ''' Convert sos file to html format with syntax highlighting, and either save the output either to a HTML file or view it in a broaser. This converter accepts additional parameters --style or pygments styles, --linenos for displaying linenumbers, and a parameter --raw to embed a URL to the raw sos file. ''' from jinja2 import Environment, PackageLoader, select_autoescape environment = Environment( loader=PackageLoader('sos', 'templates'), autoescape=select_autoescape(['html', 'xml'])) template = environment.get_template( args.template if args and hasattr(args, 'template') and args.template else 'sos_script.tpl') with open(script_file) as script: content = script.read() # for backward compatibility if args and hasattr(args, 'raw'): args.url = args.raw context = { 'filename': script_file, 'basename': os.path.basename(script_file), 'script': content, 'sos_version': __version__, 'linenos': args.linenos if args and hasattr(args, 'linenos') else True, 'url': args.url if args and hasattr(args, 'url') else '', 'theme': args.style if args and hasattr(args, 'style') else 'default', } html_content = template.render(context) if html_file is None: if args and args.view: # write to a temp file import tempfile html_file = tempfile.NamedTemporaryFile( delete=False, suffix='.html').name with open(html_file, 'w') as out: out.write(html_content) else: sys.stdout.write(html_content) else: with open(html_file, 'w') as out: out.write(html_content) env.logger.info(f'SoS script saved to {html_file}') # if args and args.view: import webbrowser url = f'file://{os.path.abspath(html_file)}' env.logger.info(f'Viewing {url} in a browser') webbrowser.open(url, new=2) # in case the html file is temporary, give the browser sometime to load it time.sleep(2)
[ "def", "script_to_html", "(", "script_file", ",", "html_file", ",", "args", "=", "None", ",", "unknown_args", "=", "None", ")", ":", "from", "jinja2", "import", "Environment", ",", "PackageLoader", ",", "select_autoescape", "environment", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'sos'", ",", "'templates'", ")", ",", "autoescape", "=", "select_autoescape", "(", "[", "'html'", ",", "'xml'", "]", ")", ")", "template", "=", "environment", ".", "get_template", "(", "args", ".", "template", "if", "args", "and", "hasattr", "(", "args", ",", "'template'", ")", "and", "args", ".", "template", "else", "'sos_script.tpl'", ")", "with", "open", "(", "script_file", ")", "as", "script", ":", "content", "=", "script", ".", "read", "(", ")", "# for backward compatibility", "if", "args", "and", "hasattr", "(", "args", ",", "'raw'", ")", ":", "args", ".", "url", "=", "args", ".", "raw", "context", "=", "{", "'filename'", ":", "script_file", ",", "'basename'", ":", "os", ".", "path", ".", "basename", "(", "script_file", ")", ",", "'script'", ":", "content", ",", "'sos_version'", ":", "__version__", ",", "'linenos'", ":", "args", ".", "linenos", "if", "args", "and", "hasattr", "(", "args", ",", "'linenos'", ")", "else", "True", ",", "'url'", ":", "args", ".", "url", "if", "args", "and", "hasattr", "(", "args", ",", "'url'", ")", "else", "''", ",", "'theme'", ":", "args", ".", "style", "if", "args", "and", "hasattr", "(", "args", ",", "'style'", ")", "else", "'default'", ",", "}", "html_content", "=", "template", ".", "render", "(", "context", ")", "if", "html_file", "is", "None", ":", "if", "args", "and", "args", ".", "view", ":", "# write to a temp file", "import", "tempfile", "html_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "suffix", "=", "'.html'", ")", ".", "name", "with", "open", "(", "html_file", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "html_content", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "html_content", ")", "else", ":", "with", "open", "(", "html_file", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "html_content", ")", "env", ".", "logger", ".", "info", "(", "f'SoS script saved to {html_file}'", ")", "#", "if", "args", "and", "args", ".", "view", ":", "import", "webbrowser", "url", "=", "f'file://{os.path.abspath(html_file)}'", "env", ".", "logger", ".", "info", "(", "f'Viewing {url} in a browser'", ")", "webbrowser", ".", "open", "(", "url", ",", "new", "=", "2", ")", "# in case the html file is temporary, give the browser sometime to load it", "time", ".", "sleep", "(", "2", ")" ]
Convert sos file to html format with syntax highlighting, and either save the output either to a HTML file or view it in a broaser. This converter accepts additional parameters --style or pygments styles, --linenos for displaying linenumbers, and a parameter --raw to embed a URL to the raw sos file.
[ "Convert", "sos", "file", "to", "html", "format", "with", "syntax", "highlighting", "and", "either", "save", "the", "output", "either", "to", "a", "HTML", "file", "or", "view", "it", "in", "a", "broaser", ".", "This", "converter", "accepts", "additional", "parameters", "--", "style", "or", "pygments", "styles", "--", "linenos", "for", "displaying", "linenumbers", "and", "a", "parameter", "--", "raw", "to", "embed", "a", "URL", "to", "the", "raw", "sos", "file", "." ]
python
train
40.528302
hyperledger/indy-plenum
stp_core/loop/eventually.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/loop/eventually.py#L50-L113
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed) totalTimeout: float, retryWait: float=0.1, acceptableExceptions=None, acceptableFails: int=0, override_timeout_limit=False): # TODO: Bug when `acceptableFails` > 0 if the first check fails, it will # exhaust the entire timeout. """ :param coroFuncs: iterable of no-arg functions :param totalTimeout: :param retryWait: :param acceptableExceptions: :param acceptableFails: how many of the passed in coroutines can ultimately fail and still be ok :return: """ start = time.perf_counter() def remaining(): return totalTimeout + start - time.perf_counter() funcNames = [] others = 0 fails = 0 rem = None for cf in coroFuncs: if len(funcNames) < 2: funcNames.append(get_func_name(cf)) else: others += 1 # noinspection PyBroadException try: rem = remaining() if rem <= 0: break await eventually(cf, retryWait=retryWait, timeout=rem, acceptableExceptions=acceptableExceptions, verbose=True, override_timeout_limit=override_timeout_limit) except Exception as ex: if acceptableExceptions and type(ex) not in acceptableExceptions: raise fails += 1 logger.debug("a coro {} with args {} timed out without succeeding; fail count: " "{}, acceptable: {}". format(get_func_name(cf), get_func_args(cf), fails, acceptableFails)) if fails > acceptableFails: raise if rem is not None and rem <= 0: fails += 1 if fails > acceptableFails: err = 'All checks could not complete successfully since total timeout ' \ 'expired {} sec ago'.format(-1 * rem if rem < 0 else 0) raise EventuallyTimeoutException(err) if others: funcNames.append("and {} others".format(others)) desc = ", ".join(funcNames) logger.debug("{} succeeded with {:.2f} seconds to spare". format(desc, remaining()))
[ "async", "def", "eventuallyAll", "(", "*", "coroFuncs", ":", "FlexFunc", ",", "# (use functools.partials if needed)", "totalTimeout", ":", "float", ",", "retryWait", ":", "float", "=", "0.1", ",", "acceptableExceptions", "=", "None", ",", "acceptableFails", ":", "int", "=", "0", ",", "override_timeout_limit", "=", "False", ")", ":", "# TODO: Bug when `acceptableFails` > 0 if the first check fails, it will", "# exhaust the entire timeout.", "start", "=", "time", ".", "perf_counter", "(", ")", "def", "remaining", "(", ")", ":", "return", "totalTimeout", "+", "start", "-", "time", ".", "perf_counter", "(", ")", "funcNames", "=", "[", "]", "others", "=", "0", "fails", "=", "0", "rem", "=", "None", "for", "cf", "in", "coroFuncs", ":", "if", "len", "(", "funcNames", ")", "<", "2", ":", "funcNames", ".", "append", "(", "get_func_name", "(", "cf", ")", ")", "else", ":", "others", "+=", "1", "# noinspection PyBroadException", "try", ":", "rem", "=", "remaining", "(", ")", "if", "rem", "<=", "0", ":", "break", "await", "eventually", "(", "cf", ",", "retryWait", "=", "retryWait", ",", "timeout", "=", "rem", ",", "acceptableExceptions", "=", "acceptableExceptions", ",", "verbose", "=", "True", ",", "override_timeout_limit", "=", "override_timeout_limit", ")", "except", "Exception", "as", "ex", ":", "if", "acceptableExceptions", "and", "type", "(", "ex", ")", "not", "in", "acceptableExceptions", ":", "raise", "fails", "+=", "1", "logger", ".", "debug", "(", "\"a coro {} with args {} timed out without succeeding; fail count: \"", "\"{}, acceptable: {}\"", ".", "format", "(", "get_func_name", "(", "cf", ")", ",", "get_func_args", "(", "cf", ")", ",", "fails", ",", "acceptableFails", ")", ")", "if", "fails", ">", "acceptableFails", ":", "raise", "if", "rem", "is", "not", "None", "and", "rem", "<=", "0", ":", "fails", "+=", "1", "if", "fails", ">", "acceptableFails", ":", "err", "=", "'All checks could not complete successfully since total timeout '", "'expired {} sec ago'", ".", "format", "(", "-", "1", "*", "rem", "if", "rem", "<", "0", "else", "0", ")", "raise", "EventuallyTimeoutException", "(", "err", ")", "if", "others", ":", "funcNames", ".", "append", "(", "\"and {} others\"", ".", "format", "(", "others", ")", ")", "desc", "=", "\", \"", ".", "join", "(", "funcNames", ")", "logger", ".", "debug", "(", "\"{} succeeded with {:.2f} seconds to spare\"", ".", "format", "(", "desc", ",", "remaining", "(", ")", ")", ")" ]
:param coroFuncs: iterable of no-arg functions :param totalTimeout: :param retryWait: :param acceptableExceptions: :param acceptableFails: how many of the passed in coroutines can ultimately fail and still be ok :return:
[ ":", "param", "coroFuncs", ":", "iterable", "of", "no", "-", "arg", "functions", ":", "param", "totalTimeout", ":", ":", "param", "retryWait", ":", ":", "param", "acceptableExceptions", ":", ":", "param", "acceptableFails", ":", "how", "many", "of", "the", "passed", "in", "coroutines", "can", "ultimately", "fail", "and", "still", "be", "ok", ":", "return", ":" ]
python
train
37.140625
tomprince/txgithub
txgithub/api.py
https://github.com/tomprince/txgithub/blob/3bd5eebb25db013e2193e6a102a91049f356710d/txgithub/api.py#L203-L233
def editHook(self, repo_user, repo_name, hook_id, name, config, events=None, add_events=None, remove_events=None, active=None): """ PATCH /repos/:owner/:repo/hooks/:id :param hook_id: Id of the hook. :param name: The name of the service that is being called. :param config: A Hash containing key/value pairs to provide settings for this hook. """ post = dict( name=name, config=config, ) if events is not None: post['events'] = events if add_events is not None: post['add_events'] = add_events if remove_events is not None: post['remove_events'] = remove_events if active is not None: post['active'] = active return self.api.makeRequest( ['repos', repo_user, repo_name, 'hooks', str(hook_id)], method='PATCH', post=post, )
[ "def", "editHook", "(", "self", ",", "repo_user", ",", "repo_name", ",", "hook_id", ",", "name", ",", "config", ",", "events", "=", "None", ",", "add_events", "=", "None", ",", "remove_events", "=", "None", ",", "active", "=", "None", ")", ":", "post", "=", "dict", "(", "name", "=", "name", ",", "config", "=", "config", ",", ")", "if", "events", "is", "not", "None", ":", "post", "[", "'events'", "]", "=", "events", "if", "add_events", "is", "not", "None", ":", "post", "[", "'add_events'", "]", "=", "add_events", "if", "remove_events", "is", "not", "None", ":", "post", "[", "'remove_events'", "]", "=", "remove_events", "if", "active", "is", "not", "None", ":", "post", "[", "'active'", "]", "=", "active", "return", "self", ".", "api", ".", "makeRequest", "(", "[", "'repos'", ",", "repo_user", ",", "repo_name", ",", "'hooks'", ",", "str", "(", "hook_id", ")", "]", ",", "method", "=", "'PATCH'", ",", "post", "=", "post", ",", ")" ]
PATCH /repos/:owner/:repo/hooks/:id :param hook_id: Id of the hook. :param name: The name of the service that is being called. :param config: A Hash containing key/value pairs to provide settings for this hook.
[ "PATCH", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "hooks", "/", ":", "id" ]
python
train
31.387097
jeffknupp/sandman
sandman/sandman.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandman.py#L165-L174
def _collection_html_response(resources, start=0, stop=20): """Return the HTML representation of the collection *resources*. :param list resources: list of :class:`sandman.model.Model`s to render :rtype: :class:`flask.Response` """ return make_response(render_template( 'collection.html', resources=resources[start:stop]))
[ "def", "_collection_html_response", "(", "resources", ",", "start", "=", "0", ",", "stop", "=", "20", ")", ":", "return", "make_response", "(", "render_template", "(", "'collection.html'", ",", "resources", "=", "resources", "[", "start", ":", "stop", "]", ")", ")" ]
Return the HTML representation of the collection *resources*. :param list resources: list of :class:`sandman.model.Model`s to render :rtype: :class:`flask.Response`
[ "Return", "the", "HTML", "representation", "of", "the", "collection", "*", "resources", "*", "." ]
python
train
35.1
emc-openstack/storops
storops/lib/converter.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/lib/converter.py#L312-L333
def ipv6_prefix_to_mask(prefix): """ ipv6 cidr prefix to net mask :param prefix: cidr prefix, rang in (0, 128) :type prefix: int :return: comma separated ipv6 net mask code, eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000 :rtype: str """ if prefix > 128 or prefix < 0: raise ValueError("invalid cidr prefix for ipv6") else: mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1) f = 15 # 0xf or 0b1111 hex_mask_str = '' for i in range(0, 32): hex_mask_str = format((mask & f), 'x') + hex_mask_str mask = mask >> 4 if i != 31 and i & 3 == 3: hex_mask_str = ':' + hex_mask_str return hex_mask_str
[ "def", "ipv6_prefix_to_mask", "(", "prefix", ")", ":", "if", "prefix", ">", "128", "or", "prefix", "<", "0", ":", "raise", "ValueError", "(", "\"invalid cidr prefix for ipv6\"", ")", "else", ":", "mask", "=", "(", "(", "1", "<<", "128", ")", "-", "1", ")", "^", "(", "(", "1", "<<", "(", "128", "-", "prefix", ")", ")", "-", "1", ")", "f", "=", "15", "# 0xf or 0b1111", "hex_mask_str", "=", "''", "for", "i", "in", "range", "(", "0", ",", "32", ")", ":", "hex_mask_str", "=", "format", "(", "(", "mask", "&", "f", ")", ",", "'x'", ")", "+", "hex_mask_str", "mask", "=", "mask", ">>", "4", "if", "i", "!=", "31", "and", "i", "&", "3", "==", "3", ":", "hex_mask_str", "=", "':'", "+", "hex_mask_str", "return", "hex_mask_str" ]
ipv6 cidr prefix to net mask :param prefix: cidr prefix, rang in (0, 128) :type prefix: int :return: comma separated ipv6 net mask code, eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000 :rtype: str
[ "ipv6", "cidr", "prefix", "to", "net", "mask" ]
python
train
32.681818
danielhrisca/asammdf
asammdf/blocks/mdf_v4.py
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L2229-L2276
def configure( self, *, read_fragment_size=None, write_fragment_size=None, use_display_names=None, single_bit_uint_as_bool=None, integer_interpolation=None, ): """ configure MDF parameters Parameters ---------- read_fragment_size : int size hint of split data blocks, default 8MB; if the initial size is smaller, then no data list is used. The actual split size depends on the data groups' records size write_fragment_size : int size hint of split data blocks, default 4MB; if the initial size is smaller, then no data list is used. The actual split size depends on the data groups' records size. Maximum size is 4MB to ensure compatibility with CANape use_display_names : bool search for display name in the Channel XML comment single_bit_uint_as_bool : bool return single bit channels are np.bool arrays integer_interpolation : int interpolation mode for integer channels: * 0 - repeat previous sample * 1 - use linear interpolation """ if read_fragment_size is not None: self._read_fragment_size = int(read_fragment_size) if write_fragment_size: self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20) if use_display_names is not None: self._use_display_names = bool(use_display_names) if single_bit_uint_as_bool is not None: self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool) if integer_interpolation in (0, 1): self._integer_interpolation = int(integer_interpolation)
[ "def", "configure", "(", "self", ",", "*", ",", "read_fragment_size", "=", "None", ",", "write_fragment_size", "=", "None", ",", "use_display_names", "=", "None", ",", "single_bit_uint_as_bool", "=", "None", ",", "integer_interpolation", "=", "None", ",", ")", ":", "if", "read_fragment_size", "is", "not", "None", ":", "self", ".", "_read_fragment_size", "=", "int", "(", "read_fragment_size", ")", "if", "write_fragment_size", ":", "self", ".", "_write_fragment_size", "=", "min", "(", "int", "(", "write_fragment_size", ")", ",", "4", "*", "2", "**", "20", ")", "if", "use_display_names", "is", "not", "None", ":", "self", ".", "_use_display_names", "=", "bool", "(", "use_display_names", ")", "if", "single_bit_uint_as_bool", "is", "not", "None", ":", "self", ".", "_single_bit_uint_as_bool", "=", "bool", "(", "single_bit_uint_as_bool", ")", "if", "integer_interpolation", "in", "(", "0", ",", "1", ")", ":", "self", ".", "_integer_interpolation", "=", "int", "(", "integer_interpolation", ")" ]
configure MDF parameters Parameters ---------- read_fragment_size : int size hint of split data blocks, default 8MB; if the initial size is smaller, then no data list is used. The actual split size depends on the data groups' records size write_fragment_size : int size hint of split data blocks, default 4MB; if the initial size is smaller, then no data list is used. The actual split size depends on the data groups' records size. Maximum size is 4MB to ensure compatibility with CANape use_display_names : bool search for display name in the Channel XML comment single_bit_uint_as_bool : bool return single bit channels are np.bool arrays integer_interpolation : int interpolation mode for integer channels: * 0 - repeat previous sample * 1 - use linear interpolation
[ "configure", "MDF", "parameters" ]
python
train
36.3125
coleifer/walrus
walrus/database.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/database.py#L150-L163
def run_script(self, script_name, keys=None, args=None): """ Execute a walrus script with the given arguments. :param script_name: The base name of the script to execute. :param list keys: Keys referenced by the script. :param list args: Arguments passed in to the script. :returns: Return value of script. .. note:: Redis scripts require two parameters, ``keys`` and ``args``, which are referenced in lua as ``KEYS`` and ``ARGV``. """ return self._scripts[script_name](keys, args)
[ "def", "run_script", "(", "self", ",", "script_name", ",", "keys", "=", "None", ",", "args", "=", "None", ")", ":", "return", "self", ".", "_scripts", "[", "script_name", "]", "(", "keys", ",", "args", ")" ]
Execute a walrus script with the given arguments. :param script_name: The base name of the script to execute. :param list keys: Keys referenced by the script. :param list args: Arguments passed in to the script. :returns: Return value of script. .. note:: Redis scripts require two parameters, ``keys`` and ``args``, which are referenced in lua as ``KEYS`` and ``ARGV``.
[ "Execute", "a", "walrus", "script", "with", "the", "given", "arguments", "." ]
python
train
40.428571
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5541-L5546
def removeNotification(self, notificationId): """Destroy a notification, hiding it first if it currently shown to the user.""" fn = self.function_table.removeNotification result = fn(notificationId) return result
[ "def", "removeNotification", "(", "self", ",", "notificationId", ")", ":", "fn", "=", "self", ".", "function_table", ".", "removeNotification", "result", "=", "fn", "(", "notificationId", ")", "return", "result" ]
Destroy a notification, hiding it first if it currently shown to the user.
[ "Destroy", "a", "notification", "hiding", "it", "first", "if", "it", "currently", "shown", "to", "the", "user", "." ]
python
train
40
stevelittlefish/littlefish
littlefish/timetool.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/timetool.py#L290-L304
def unix_time(dt=None, as_int=False): """Generate a unix style timestamp (in seconds)""" if dt is None: dt = datetime.datetime.utcnow() if type(dt) is datetime.date: dt = date_to_datetime(dt) epoch = datetime.datetime.utcfromtimestamp(0) delta = dt - epoch if as_int: return int(delta.total_seconds()) return delta.total_seconds()
[ "def", "unix_time", "(", "dt", "=", "None", ",", "as_int", "=", "False", ")", ":", "if", "dt", "is", "None", ":", "dt", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "if", "type", "(", "dt", ")", "is", "datetime", ".", "date", ":", "dt", "=", "date_to_datetime", "(", "dt", ")", "epoch", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "0", ")", "delta", "=", "dt", "-", "epoch", "if", "as_int", ":", "return", "int", "(", "delta", ".", "total_seconds", "(", ")", ")", "return", "delta", ".", "total_seconds", "(", ")" ]
Generate a unix style timestamp (in seconds)
[ "Generate", "a", "unix", "style", "timestamp", "(", "in", "seconds", ")" ]
python
test
25.066667
ergo/ziggurat_foundations
ziggurat_foundations/models/services/user.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/user.py#L367-L382
def user_names_like(cls, user_name, db_session=None): """ fetch users with similar names using LIKE clause :param user_name: :param db_session: :return: """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter( sa.func.lower(cls.model.user_name).like((user_name or "").lower()) ) query = query.order_by(cls.model.user_name) # q = q.options(sa.orm.eagerload('groups')) return query
[ "def", "user_names_like", "(", "cls", ",", "user_name", ",", "db_session", "=", "None", ")", ":", "db_session", "=", "get_db_session", "(", "db_session", ")", "query", "=", "db_session", ".", "query", "(", "cls", ".", "model", ")", "query", "=", "query", ".", "filter", "(", "sa", ".", "func", ".", "lower", "(", "cls", ".", "model", ".", "user_name", ")", ".", "like", "(", "(", "user_name", "or", "\"\"", ")", ".", "lower", "(", ")", ")", ")", "query", "=", "query", ".", "order_by", "(", "cls", ".", "model", ".", "user_name", ")", "# q = q.options(sa.orm.eagerload('groups'))", "return", "query" ]
fetch users with similar names using LIKE clause :param user_name: :param db_session: :return:
[ "fetch", "users", "with", "similar", "names", "using", "LIKE", "clause" ]
python
train
32.875
Nachtfeuer/pipeline
spline/tools/condition.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/condition.py#L144-L167
def match_tokens(ast_tokens, ast_types): """ Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types """ ast_final_types = [ast.Module, ast.Expr] + ast_types return all(isinstance(ast_token, ast_type) for ast_token, ast_type in zip(ast_tokens, ast_final_types))
[ "def", "match_tokens", "(", "ast_tokens", ",", "ast_types", ")", ":", "ast_final_types", "=", "[", "ast", ".", "Module", ",", "ast", ".", "Expr", "]", "+", "ast_types", "return", "all", "(", "isinstance", "(", "ast_token", ",", "ast_type", ")", "for", "ast_token", ",", "ast_type", "in", "zip", "(", "ast_tokens", ",", "ast_final_types", ")", ")" ]
Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types
[ "Verify", "that", "each", "token", "in", "order", "does", "match", "the", "expected", "types", "." ]
python
train
41.625
totalgood/nlpia
src/nlpia/loaders.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1027-L1113
def get_data(name='sms-spam', nrows=None, limit=None): """ Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> get_data('imdb_test').info() <class 'pandas.core.frame.DataFrame'> MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9) Data columns (total 3 columns): url 20 non-null object rating 20 non-null int64 text 20 non-null object dtypes: int64(1), object(2) memory usage: 809.0+ bytes """ nrows = nrows or limit if name in BIG_URLS: logger.info('Downloading {}'.format(name)) filepaths = download_unzip(name, normalize_filenames=True) logger.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths)) filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name] logger.debug('nlpia.loaders.get_data.filepath=' + str(filepath)) filepathlow = filepath.lower() if len(BIG_URLS[name]) >= 4: kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {} return BIG_URLS[name][3](filepath, **kwargs) if filepathlow.endswith('.w2v.txt'): try: return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'): try: return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.gz'): try: filepath = ensure_open(filepath) except: # noqa pass if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow): return read_json(filepath) if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'): try: return pd.read_table(filepath) except: # noqa pass if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'): try: return read_csv(filepath) except: # noqa pass if filepathlow.endswith('.txt'): try: return read_txt(filepath) except (TypeError, UnicodeError): pass return filepaths[name] elif name in DATASET_NAME2FILENAME: return read_named_csv(name, nrows=nrows) elif name in DATA_NAMES: return read_named_csv(DATA_NAMES[name], nrows=nrows) elif os.path.isfile(name): return read_named_csv(name, nrows=nrows) elif os.path.isfile(os.path.join(DATA_PATH, name)): return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format( name, DATA_PATH, BIGDATA_PATH) msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES)) logger.error(msg) raise IOError(msg)
[ "def", "get_data", "(", "name", "=", "'sms-spam'", ",", "nrows", "=", "None", ",", "limit", "=", "None", ")", ":", "nrows", "=", "nrows", "or", "limit", "if", "name", "in", "BIG_URLS", ":", "logger", ".", "info", "(", "'Downloading {}'", ".", "format", "(", "name", ")", ")", "filepaths", "=", "download_unzip", "(", "name", ",", "normalize_filenames", "=", "True", ")", "logger", ".", "debug", "(", "'nlpia.loaders.get_data.filepaths='", "+", "str", "(", "filepaths", ")", ")", "filepath", "=", "filepaths", "[", "name", "]", "[", "0", "]", "if", "isinstance", "(", "filepaths", "[", "name", "]", ",", "(", "list", ",", "tuple", ")", ")", "else", "filepaths", "[", "name", "]", "logger", ".", "debug", "(", "'nlpia.loaders.get_data.filepath='", "+", "str", "(", "filepath", ")", ")", "filepathlow", "=", "filepath", ".", "lower", "(", ")", "if", "len", "(", "BIG_URLS", "[", "name", "]", ")", ">=", "4", ":", "kwargs", "=", "BIG_URLS", "[", "name", "]", "[", "4", "]", "if", "len", "(", "BIG_URLS", "[", "name", "]", ")", ">=", "5", "else", "{", "}", "return", "BIG_URLS", "[", "name", "]", "[", "3", "]", "(", "filepath", ",", "*", "*", "kwargs", ")", "if", "filepathlow", ".", "endswith", "(", "'.w2v.txt'", ")", ":", "try", ":", "return", "KeyedVectors", ".", "load_word2vec_format", "(", "filepath", ",", "binary", "=", "False", ",", "limit", "=", "nrows", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "if", "filepathlow", ".", "endswith", "(", "'.w2v.bin'", ")", "or", "filepathlow", ".", "endswith", "(", "'.bin.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.w2v.bin.gz'", ")", ":", "try", ":", "return", "KeyedVectors", ".", "load_word2vec_format", "(", "filepath", ",", "binary", "=", "True", ",", "limit", "=", "nrows", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "if", "filepathlow", ".", "endswith", "(", "'.gz'", ")", ":", "try", ":", "filepath", "=", "ensure_open", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "re", ".", "match", "(", "r'.json([.][a-z]{0,3}){0,2}'", ",", "filepathlow", ")", ":", "return", "read_json", "(", "filepath", ")", "if", "filepathlow", ".", "endswith", "(", "'.tsv.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.tsv'", ")", ":", "try", ":", "return", "pd", ".", "read_table", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "filepathlow", ".", "endswith", "(", "'.csv.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.csv'", ")", ":", "try", ":", "return", "read_csv", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "filepathlow", ".", "endswith", "(", "'.txt'", ")", ":", "try", ":", "return", "read_txt", "(", "filepath", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "return", "filepaths", "[", "name", "]", "elif", "name", "in", "DATASET_NAME2FILENAME", ":", "return", "read_named_csv", "(", "name", ",", "nrows", "=", "nrows", ")", "elif", "name", "in", "DATA_NAMES", ":", "return", "read_named_csv", "(", "DATA_NAMES", "[", "name", "]", ",", "nrows", "=", "nrows", ")", "elif", "os", ".", "path", ".", "isfile", "(", "name", ")", ":", "return", "read_named_csv", "(", "name", ",", "nrows", "=", "nrows", ")", "elif", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "name", ")", ")", ":", "return", "read_named_csv", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "name", ")", ",", "nrows", "=", "nrows", ")", "msg", "=", "'Unable to find dataset \"{}\"\" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\\n'", ".", "format", "(", "name", ",", "DATA_PATH", ",", "BIGDATA_PATH", ")", "msg", "+=", "'Available dataset names include:\\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "DATASET_NAMES", ")", ")", "logger", ".", "error", "(", "msg", ")", "raise", "IOError", "(", "msg", ")" ]
Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> get_data('imdb_test').info() <class 'pandas.core.frame.DataFrame'> MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9) Data columns (total 3 columns): url 20 non-null object rating 20 non-null int64 text 20 non-null object dtypes: int64(1), object(2) memory usage: 809.0+ bytes
[ "Load", "data", "from", "a", "json", "csv", "or", "txt", "file", "if", "it", "exists", "in", "the", "data", "dir", "." ]
python
train
42.517241
mitsei/dlkit
dlkit/json_/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L1481-L1498
def get_objective_lookup_session(self, proxy): """Gets the ``OsidSession`` associated with the objective lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveLookupSession) - an ``ObjectiveLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_lookup()`` is ``true``.* """ if not self.supports_objective_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ObjectiveLookupSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_objective_lookup_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_objective_lookup", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ObjectiveLookupSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the objective lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveLookupSession) - an ``ObjectiveLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_lookup()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "objective", "lookup", "service", "." ]
python
train
45.333333
nccgroup/Scout2
AWSScout2/output/console.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/output/console.py#L71-L95
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False): """ Format and print the output of ListAll :param lines: :param resources: :param aws_config: :param template: :param arguments: :param nodup: :return: """ for line in lines: output = [] for resource in resources: current_path = resource.split('.') outline = line[1] for key in line[2]: outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True)) output.append(outline) output = '\n'.join(line for line in sorted(set(output))) template = template.replace(line[0], output) for (i, argument) in enumerate(arguments): template = template.replace('_ARG_%d_' % i, argument) return template
[ "def", "generate_listall_output", "(", "lines", ",", "resources", ",", "aws_config", ",", "template", ",", "arguments", ",", "nodup", "=", "False", ")", ":", "for", "line", "in", "lines", ":", "output", "=", "[", "]", "for", "resource", "in", "resources", ":", "current_path", "=", "resource", ".", "split", "(", "'.'", ")", "outline", "=", "line", "[", "1", "]", "for", "key", "in", "line", "[", "2", "]", ":", "outline", "=", "outline", ".", "replace", "(", "'_KEY_('", "+", "key", "+", "')'", ",", "get_value_at", "(", "aws_config", "[", "'services'", "]", ",", "current_path", ",", "key", ",", "True", ")", ")", "output", ".", "append", "(", "outline", ")", "output", "=", "'\\n'", ".", "join", "(", "line", "for", "line", "in", "sorted", "(", "set", "(", "output", ")", ")", ")", "template", "=", "template", ".", "replace", "(", "line", "[", "0", "]", ",", "output", ")", "for", "(", "i", ",", "argument", ")", "in", "enumerate", "(", "arguments", ")", ":", "template", "=", "template", ".", "replace", "(", "'_ARG_%d_'", "%", "i", ",", "argument", ")", "return", "template" ]
Format and print the output of ListAll :param lines: :param resources: :param aws_config: :param template: :param arguments: :param nodup: :return:
[ "Format", "and", "print", "the", "output", "of", "ListAll" ]
python
train
34.36
openfisca/openfisca-france-indirect-taxation
openfisca_france_indirect_taxation/examples/calage_bdf_cn_bis.py
https://github.com/openfisca/openfisca-france-indirect-taxation/blob/b4bc7da90a1126ebfc3af2c3ec61de5a2b70bb2e/openfisca_france_indirect_taxation/examples/calage_bdf_cn_bis.py#L112-L125
def get_inflators(target_year): ''' Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement à partir des masses de comptabilité nationale et des masses de consommation de bdf. ''' data_year = find_nearest_inferior(data_years, target_year) inflators_bdf_to_cn = get_inflators_bdf_to_cn(data_year) inflators_cn_to_cn = get_inflators_cn_to_cn(target_year) ratio_by_variable = dict() for key in inflators_cn_to_cn.keys(): ratio_by_variable[key] = inflators_bdf_to_cn[key] * inflators_cn_to_cn[key] return ratio_by_variable
[ "def", "get_inflators", "(", "target_year", ")", ":", "data_year", "=", "find_nearest_inferior", "(", "data_years", ",", "target_year", ")", "inflators_bdf_to_cn", "=", "get_inflators_bdf_to_cn", "(", "data_year", ")", "inflators_cn_to_cn", "=", "get_inflators_cn_to_cn", "(", "target_year", ")", "ratio_by_variable", "=", "dict", "(", ")", "for", "key", "in", "inflators_cn_to_cn", ".", "keys", "(", ")", ":", "ratio_by_variable", "[", "key", "]", "=", "inflators_bdf_to_cn", "[", "key", "]", "*", "inflators_cn_to_cn", "[", "key", "]", "return", "ratio_by_variable" ]
Fonction qui calcule les ratios de calage (bdf sur cn pour année de données) et de vieillissement à partir des masses de comptabilité nationale et des masses de consommation de bdf.
[ "Fonction", "qui", "calcule", "les", "ratios", "de", "calage", "(", "bdf", "sur", "cn", "pour", "année", "de", "données", ")", "et", "de", "vieillissement", "à", "partir", "des", "masses", "de", "comptabilité", "nationale", "et", "des", "masses", "de", "consommation", "de", "bdf", "." ]
python
train
42.642857
quantmind/pulsar
pulsar/apps/__init__.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/__init__.py#L633-L665
def new_app(self, App, prefix=None, callable=None, **params): """Invoke this method in the :meth:`build` method as many times as the number of :class:`Application` required by this :class:`MultiApp`. :param App: an :class:`Application` class. :param prefix: The prefix to use for the application, the prefix is appended to the application :ref:`config parameters <settings>` and to the application name. Each call to this method must use a different value of for this parameter. It can be ``None``. :param callable: optional callable (function of object) used during initialisation of *App* (the :class:`Application.callable`). :param params: additional key-valued parameters used when creating an instance of *App*. :return: a tuple used by the :meth:`apps` method. """ params.update(self.cfg.params) params.pop('name', None) # remove the name prefix = prefix or '' if not prefix and '' in self._apps: prefix = App.name or App.__name__.lower() if not prefix: name = self.name cfg = App.create_config(params, name=name) else: name = '%s_%s' % (prefix, self.name) cfg = App.create_config(params, prefix=prefix, name=name) # Add the config entry to the multi app config if not available for k in cfg.settings: if k not in self.cfg.settings: self.cfg.settings[k] = cfg.settings[k] return new_app(prefix, (App, name, callable, cfg))
[ "def", "new_app", "(", "self", ",", "App", ",", "prefix", "=", "None", ",", "callable", "=", "None", ",", "*", "*", "params", ")", ":", "params", ".", "update", "(", "self", ".", "cfg", ".", "params", ")", "params", ".", "pop", "(", "'name'", ",", "None", ")", "# remove the name", "prefix", "=", "prefix", "or", "''", "if", "not", "prefix", "and", "''", "in", "self", ".", "_apps", ":", "prefix", "=", "App", ".", "name", "or", "App", ".", "__name__", ".", "lower", "(", ")", "if", "not", "prefix", ":", "name", "=", "self", ".", "name", "cfg", "=", "App", ".", "create_config", "(", "params", ",", "name", "=", "name", ")", "else", ":", "name", "=", "'%s_%s'", "%", "(", "prefix", ",", "self", ".", "name", ")", "cfg", "=", "App", ".", "create_config", "(", "params", ",", "prefix", "=", "prefix", ",", "name", "=", "name", ")", "# Add the config entry to the multi app config if not available", "for", "k", "in", "cfg", ".", "settings", ":", "if", "k", "not", "in", "self", ".", "cfg", ".", "settings", ":", "self", ".", "cfg", ".", "settings", "[", "k", "]", "=", "cfg", ".", "settings", "[", "k", "]", "return", "new_app", "(", "prefix", ",", "(", "App", ",", "name", ",", "callable", ",", "cfg", ")", ")" ]
Invoke this method in the :meth:`build` method as many times as the number of :class:`Application` required by this :class:`MultiApp`. :param App: an :class:`Application` class. :param prefix: The prefix to use for the application, the prefix is appended to the application :ref:`config parameters <settings>` and to the application name. Each call to this method must use a different value of for this parameter. It can be ``None``. :param callable: optional callable (function of object) used during initialisation of *App* (the :class:`Application.callable`). :param params: additional key-valued parameters used when creating an instance of *App*. :return: a tuple used by the :meth:`apps` method.
[ "Invoke", "this", "method", "in", "the", ":", "meth", ":", "build", "method", "as", "many", "times", "as", "the", "number", "of", ":", "class", ":", "Application", "required", "by", "this", ":", "class", ":", "MultiApp", "." ]
python
train
48.606061
DataBiosphere/dsub
dsub/commands/dstat.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/commands/dstat.py#L192-L197
def string_presenter(self, dumper, data): """Presenter to force yaml.dump to use multi-line string style.""" if '\n' in data: return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') else: return dumper.represent_scalar('tag:yaml.org,2002:str', data)
[ "def", "string_presenter", "(", "self", ",", "dumper", ",", "data", ")", ":", "if", "'\\n'", "in", "data", ":", "return", "dumper", ".", "represent_scalar", "(", "'tag:yaml.org,2002:str'", ",", "data", ",", "style", "=", "'|'", ")", "else", ":", "return", "dumper", ".", "represent_scalar", "(", "'tag:yaml.org,2002:str'", ",", "data", ")" ]
Presenter to force yaml.dump to use multi-line string style.
[ "Presenter", "to", "force", "yaml", ".", "dump", "to", "use", "multi", "-", "line", "string", "style", "." ]
python
valid
47.5