Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
1,500
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_items_of_reminder
def get_all_items_of_reminder(self, reminder_id): """ Get all items of reminder This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param reminder_id: the reminder id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_reminder_per_page, resource=REMINDER_ITEMS, **{'reminder_id': reminder_id} )
python
def get_all_items_of_reminder(self, reminder_id): """ Get all items of reminder This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param reminder_id: the reminder id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_reminder_per_page, resource=REMINDER_ITEMS, **{'reminder_id': reminder_id} )
['def', 'get_all_items_of_reminder', '(', 'self', ',', 'reminder_id', ')', ':', 'return', 'self', '.', '_iterate_through_pages', '(', 'get_function', '=', 'self', '.', 'get_items_of_reminder_per_page', ',', 'resource', '=', 'REMINDER_ITEMS', ',', '*', '*', '{', "'reminder_id'", ':', 'reminder_id', '}', ')']
Get all items of reminder This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param reminder_id: the reminder id :return: list
['Get', 'all', 'items', 'of', 'reminder', 'This', 'will', 'iterate', 'over', 'all', 'pages', 'until', 'it', 'gets', 'all', 'elements', '.', 'So', 'if', 'the', 'rate', 'limit', 'exceeded', 'it', 'will', 'throw', 'an', 'Exception', 'and', 'you', 'will', 'get', 'nothing']
train
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3368-L3381
1,501
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
KafNafParser.remove_properties
def remove_properties(self): """ Removes the property layer (if exists) of the object (in memory) """ if self.features_layer is not None: self.features_layer.remove_properties() if self.header is not None: self.header.remove_lp('features')
python
def remove_properties(self): """ Removes the property layer (if exists) of the object (in memory) """ if self.features_layer is not None: self.features_layer.remove_properties() if self.header is not None: self.header.remove_lp('features')
['def', 'remove_properties', '(', 'self', ')', ':', 'if', 'self', '.', 'features_layer', 'is', 'not', 'None', ':', 'self', '.', 'features_layer', '.', 'remove_properties', '(', ')', 'if', 'self', '.', 'header', 'is', 'not', 'None', ':', 'self', '.', 'header', '.', 'remove_lp', '(', "'features'", ')']
Removes the property layer (if exists) of the object (in memory)
['Removes', 'the', 'property', 'layer', '(', 'if', 'exists', ')', 'of', 'the', 'object', '(', 'in', 'memory', ')']
train
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L828-L836
1,502
ninuxorg/nodeshot
nodeshot/interop/oldimporter/management/commands/import_old_nodeshot.py
Command.import_users
def import_users(self): """ save users to local DB """ self.message('saving users into local DB') saved_users = self.saved_admins # loop over all extracted unique email addresses for email in self.email_set: owner = self.users_dict[email].get('owner') # if owner is not specified, build username from email if owner.strip() == '': owner, domain = email.split('@') # replace any points with a space owner = owner.replace('.', ' ') # if owner has a space, assume he specified first and last name if ' ' in owner: owner_parts = owner.split(' ') first_name = owner_parts[0] last_name = owner_parts[1] else: first_name = owner last_name = '' # username must be slugified otherwise won't get into the DB username = slugify(owner) # check if user exists first try: # try looking by email user = User.objects.get(email=email) except User.DoesNotExist: # otherwise init new user = User() user.username = username # generate new password only for new users user.password = self.generate_random_password() user.is_active = True # we'll create one user for each unique email address we've got user.first_name = first_name.capitalize() user.last_name = last_name.capitalize() user.email = email # extract date joined from old nodes # find the oldest node of this user oldest_node = OldNode.objects.filter(email=email).order_by('added')[0] user.date_joined = oldest_node.added # be sure username is unique counter = 1 original_username = username while True: # do this check only if user is new if not user.pk and User.objects.filter(username=user.username).count() > 0: counter += 1 user.username = '%s%d' % (original_username, counter) else: break try: # validate data and save user.full_clean() user.save(sync_emailaddress=False) except Exception: # if user already exists use that instance if(User.objects.filter(email=email).count() == 1): user = User.objects.get(email=email) # otherwise report error else: tb = traceback.format_exc() self.message('Could not save user %s, got exception:\n\n%s' % (user.username, tb)) continue # if we got a user to add if user: # store id self.users_dict[email]['id'] = user.id # append to saved users saved_users.append(user) self.verbose('Saved user %s (%s) with email <%s>' % (user.username, user.get_full_name(), user.email)) # mark email address as confirmed if feature is enabled if EMAIL_CONFIRMATION and EmailAddress.objects.filter(email=user.email).count() is 0: try: email_address = EmailAddress(user=user, email=user.email, verified=True, primary=True) email_address.full_clean() email_address.save() except Exception: tb = traceback.format_exc() self.message('Could not save email address for user %s, got exception:\n\n%s' % (user.username, tb)) self.message('saved %d users into local DB' % len(saved_users)) self.saved_users = saved_users
python
def import_users(self): """ save users to local DB """ self.message('saving users into local DB') saved_users = self.saved_admins # loop over all extracted unique email addresses for email in self.email_set: owner = self.users_dict[email].get('owner') # if owner is not specified, build username from email if owner.strip() == '': owner, domain = email.split('@') # replace any points with a space owner = owner.replace('.', ' ') # if owner has a space, assume he specified first and last name if ' ' in owner: owner_parts = owner.split(' ') first_name = owner_parts[0] last_name = owner_parts[1] else: first_name = owner last_name = '' # username must be slugified otherwise won't get into the DB username = slugify(owner) # check if user exists first try: # try looking by email user = User.objects.get(email=email) except User.DoesNotExist: # otherwise init new user = User() user.username = username # generate new password only for new users user.password = self.generate_random_password() user.is_active = True # we'll create one user for each unique email address we've got user.first_name = first_name.capitalize() user.last_name = last_name.capitalize() user.email = email # extract date joined from old nodes # find the oldest node of this user oldest_node = OldNode.objects.filter(email=email).order_by('added')[0] user.date_joined = oldest_node.added # be sure username is unique counter = 1 original_username = username while True: # do this check only if user is new if not user.pk and User.objects.filter(username=user.username).count() > 0: counter += 1 user.username = '%s%d' % (original_username, counter) else: break try: # validate data and save user.full_clean() user.save(sync_emailaddress=False) except Exception: # if user already exists use that instance if(User.objects.filter(email=email).count() == 1): user = User.objects.get(email=email) # otherwise report error else: tb = traceback.format_exc() self.message('Could not save user %s, got exception:\n\n%s' % (user.username, tb)) continue # if we got a user to add if user: # store id self.users_dict[email]['id'] = user.id # append to saved users saved_users.append(user) self.verbose('Saved user %s (%s) with email <%s>' % (user.username, user.get_full_name(), user.email)) # mark email address as confirmed if feature is enabled if EMAIL_CONFIRMATION and EmailAddress.objects.filter(email=user.email).count() is 0: try: email_address = EmailAddress(user=user, email=user.email, verified=True, primary=True) email_address.full_clean() email_address.save() except Exception: tb = traceback.format_exc() self.message('Could not save email address for user %s, got exception:\n\n%s' % (user.username, tb)) self.message('saved %d users into local DB' % len(saved_users)) self.saved_users = saved_users
['def', 'import_users', '(', 'self', ')', ':', 'self', '.', 'message', '(', "'saving users into local DB'", ')', 'saved_users', '=', 'self', '.', 'saved_admins', '# loop over all extracted unique email addresses', 'for', 'email', 'in', 'self', '.', 'email_set', ':', 'owner', '=', 'self', '.', 'users_dict', '[', 'email', ']', '.', 'get', '(', "'owner'", ')', '# if owner is not specified, build username from email', 'if', 'owner', '.', 'strip', '(', ')', '==', "''", ':', 'owner', ',', 'domain', '=', 'email', '.', 'split', '(', "'@'", ')', '# replace any points with a space', 'owner', '=', 'owner', '.', 'replace', '(', "'.'", ',', "' '", ')', '# if owner has a space, assume he specified first and last name', 'if', "' '", 'in', 'owner', ':', 'owner_parts', '=', 'owner', '.', 'split', '(', "' '", ')', 'first_name', '=', 'owner_parts', '[', '0', ']', 'last_name', '=', 'owner_parts', '[', '1', ']', 'else', ':', 'first_name', '=', 'owner', 'last_name', '=', "''", "# username must be slugified otherwise won't get into the DB", 'username', '=', 'slugify', '(', 'owner', ')', '# check if user exists first', 'try', ':', '# try looking by email', 'user', '=', 'User', '.', 'objects', '.', 'get', '(', 'email', '=', 'email', ')', 'except', 'User', '.', 'DoesNotExist', ':', '# otherwise init new', 'user', '=', 'User', '(', ')', 'user', '.', 'username', '=', 'username', '# generate new password only for new users', 'user', '.', 'password', '=', 'self', '.', 'generate_random_password', '(', ')', 'user', '.', 'is_active', '=', 'True', "# we'll create one user for each unique email address we've got", 'user', '.', 'first_name', '=', 'first_name', '.', 'capitalize', '(', ')', 'user', '.', 'last_name', '=', 'last_name', '.', 'capitalize', '(', ')', 'user', '.', 'email', '=', 'email', '# extract date joined from old nodes', '# find the oldest node of this user', 'oldest_node', '=', 'OldNode', '.', 'objects', '.', 'filter', '(', 'email', '=', 'email', ')', '.', 'order_by', '(', "'added'", ')', '[', '0', ']', 'user', '.', 'date_joined', '=', 'oldest_node', '.', 'added', '# be sure username is unique', 'counter', '=', '1', 'original_username', '=', 'username', 'while', 'True', ':', '# do this check only if user is new', 'if', 'not', 'user', '.', 'pk', 'and', 'User', '.', 'objects', '.', 'filter', '(', 'username', '=', 'user', '.', 'username', ')', '.', 'count', '(', ')', '>', '0', ':', 'counter', '+=', '1', 'user', '.', 'username', '=', "'%s%d'", '%', '(', 'original_username', ',', 'counter', ')', 'else', ':', 'break', 'try', ':', '# validate data and save', 'user', '.', 'full_clean', '(', ')', 'user', '.', 'save', '(', 'sync_emailaddress', '=', 'False', ')', 'except', 'Exception', ':', '# if user already exists use that instance', 'if', '(', 'User', '.', 'objects', '.', 'filter', '(', 'email', '=', 'email', ')', '.', 'count', '(', ')', '==', '1', ')', ':', 'user', '=', 'User', '.', 'objects', '.', 'get', '(', 'email', '=', 'email', ')', '# otherwise report error', 'else', ':', 'tb', '=', 'traceback', '.', 'format_exc', '(', ')', 'self', '.', 'message', '(', "'Could not save user %s, got exception:\\n\\n%s'", '%', '(', 'user', '.', 'username', ',', 'tb', ')', ')', 'continue', '# if we got a user to add', 'if', 'user', ':', '# store id', 'self', '.', 'users_dict', '[', 'email', ']', '[', "'id'", ']', '=', 'user', '.', 'id', '# append to saved users', 'saved_users', '.', 'append', '(', 'user', ')', 'self', '.', 'verbose', '(', "'Saved user %s (%s) with email <%s>'", '%', '(', 'user', '.', 'username', ',', 'user', '.', 'get_full_name', '(', ')', ',', 'user', '.', 'email', ')', ')', '# mark email address as confirmed if feature is enabled', 'if', 'EMAIL_CONFIRMATION', 'and', 'EmailAddress', '.', 'objects', '.', 'filter', '(', 'email', '=', 'user', '.', 'email', ')', '.', 'count', '(', ')', 'is', '0', ':', 'try', ':', 'email_address', '=', 'EmailAddress', '(', 'user', '=', 'user', ',', 'email', '=', 'user', '.', 'email', ',', 'verified', '=', 'True', ',', 'primary', '=', 'True', ')', 'email_address', '.', 'full_clean', '(', ')', 'email_address', '.', 'save', '(', ')', 'except', 'Exception', ':', 'tb', '=', 'traceback', '.', 'format_exc', '(', ')', 'self', '.', 'message', '(', "'Could not save email address for user %s, got exception:\\n\\n%s'", '%', '(', 'user', '.', 'username', ',', 'tb', ')', ')', 'self', '.', 'message', '(', "'saved %d users into local DB'", '%', 'len', '(', 'saved_users', ')', ')', 'self', '.', 'saved_users', '=', 'saved_users']
save users to local DB
['save', 'users', 'to', 'local', 'DB']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/oldimporter/management/commands/import_old_nodeshot.py#L392-L486
1,503
materialsproject/pymatgen
pymatgen/analysis/structure_prediction/dopant_predictor.py
get_dopants_from_shannon_radii
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False): """ Get dopant suggestions based on Shannon radii differences. Args: bonded_structure (StructureGraph): A pymatgen structure graph decorated with oxidation states. For example, generated using the CrystalNN.get_bonded_structure() method. num_dopants (int): The nummber of suggestions to return for n- and p-type dopants. match_oxi_sign (bool): Whether to force the dopant and original species to have the same sign of oxidation state. E.g. If the original site is in a negative charge state, then only negative dopants will be returned. Returns: (dict): Dopant suggestions, given as a dictionary with keys "n_type" and "p_type". The suggestions for each doping type are given as a list of dictionaries, each with they keys: - "radii_diff": The difference between the Shannon radii of the species. - "dopant_spcies": The dopant species. - "original_species": The substituted species. """ # get a list of all Specie for all elements in all their common oxid states all_species = [Specie(el, oxi) for el in Element for oxi in el.common_oxidation_states] # get a series of tuples with (coordination number, specie) cn_and_species = set((bonded_structure.get_coordination_of_site(i), bonded_structure.structure[i].specie) for i in range(bonded_structure.structure.num_sites)) cn_to_radii_map = {} possible_dopants = [] for cn, species in cn_and_species: cn_roman = _int_to_roman(cn) try: species_radius = species.get_shannon_radius(cn_roman) except KeyError: warnings.warn("Shannon radius not found for {} with coordination " "number {}.\nSkipping...".format(species, cn)) continue if cn not in cn_to_radii_map: cn_to_radii_map[cn] = _shannon_radii_from_cn( all_species, cn_roman, radius_to_compare=species_radius) shannon_radii = cn_to_radii_map[cn] possible_dopants += [{'radii_diff': p['radii_diff'], 'dopant_species': p['species'], 'original_species': species} for p in shannon_radii] possible_dopants.sort(key=lambda x: abs(x['radii_diff'])) return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
python
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False): """ Get dopant suggestions based on Shannon radii differences. Args: bonded_structure (StructureGraph): A pymatgen structure graph decorated with oxidation states. For example, generated using the CrystalNN.get_bonded_structure() method. num_dopants (int): The nummber of suggestions to return for n- and p-type dopants. match_oxi_sign (bool): Whether to force the dopant and original species to have the same sign of oxidation state. E.g. If the original site is in a negative charge state, then only negative dopants will be returned. Returns: (dict): Dopant suggestions, given as a dictionary with keys "n_type" and "p_type". The suggestions for each doping type are given as a list of dictionaries, each with they keys: - "radii_diff": The difference between the Shannon radii of the species. - "dopant_spcies": The dopant species. - "original_species": The substituted species. """ # get a list of all Specie for all elements in all their common oxid states all_species = [Specie(el, oxi) for el in Element for oxi in el.common_oxidation_states] # get a series of tuples with (coordination number, specie) cn_and_species = set((bonded_structure.get_coordination_of_site(i), bonded_structure.structure[i].specie) for i in range(bonded_structure.structure.num_sites)) cn_to_radii_map = {} possible_dopants = [] for cn, species in cn_and_species: cn_roman = _int_to_roman(cn) try: species_radius = species.get_shannon_radius(cn_roman) except KeyError: warnings.warn("Shannon radius not found for {} with coordination " "number {}.\nSkipping...".format(species, cn)) continue if cn not in cn_to_radii_map: cn_to_radii_map[cn] = _shannon_radii_from_cn( all_species, cn_roman, radius_to_compare=species_radius) shannon_radii = cn_to_radii_map[cn] possible_dopants += [{'radii_diff': p['radii_diff'], 'dopant_species': p['species'], 'original_species': species} for p in shannon_radii] possible_dopants.sort(key=lambda x: abs(x['radii_diff'])) return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
['def', 'get_dopants_from_shannon_radii', '(', 'bonded_structure', ',', 'num_dopants', '=', '5', ',', 'match_oxi_sign', '=', 'False', ')', ':', '# get a list of all Specie for all elements in all their common oxid states', 'all_species', '=', '[', 'Specie', '(', 'el', ',', 'oxi', ')', 'for', 'el', 'in', 'Element', 'for', 'oxi', 'in', 'el', '.', 'common_oxidation_states', ']', '# get a series of tuples with (coordination number, specie)', 'cn_and_species', '=', 'set', '(', '(', 'bonded_structure', '.', 'get_coordination_of_site', '(', 'i', ')', ',', 'bonded_structure', '.', 'structure', '[', 'i', ']', '.', 'specie', ')', 'for', 'i', 'in', 'range', '(', 'bonded_structure', '.', 'structure', '.', 'num_sites', ')', ')', 'cn_to_radii_map', '=', '{', '}', 'possible_dopants', '=', '[', ']', 'for', 'cn', ',', 'species', 'in', 'cn_and_species', ':', 'cn_roman', '=', '_int_to_roman', '(', 'cn', ')', 'try', ':', 'species_radius', '=', 'species', '.', 'get_shannon_radius', '(', 'cn_roman', ')', 'except', 'KeyError', ':', 'warnings', '.', 'warn', '(', '"Shannon radius not found for {} with coordination "', '"number {}.\\nSkipping..."', '.', 'format', '(', 'species', ',', 'cn', ')', ')', 'continue', 'if', 'cn', 'not', 'in', 'cn_to_radii_map', ':', 'cn_to_radii_map', '[', 'cn', ']', '=', '_shannon_radii_from_cn', '(', 'all_species', ',', 'cn_roman', ',', 'radius_to_compare', '=', 'species_radius', ')', 'shannon_radii', '=', 'cn_to_radii_map', '[', 'cn', ']', 'possible_dopants', '+=', '[', '{', "'radii_diff'", ':', 'p', '[', "'radii_diff'", ']', ',', "'dopant_species'", ':', 'p', '[', "'species'", ']', ',', "'original_species'", ':', 'species', '}', 'for', 'p', 'in', 'shannon_radii', ']', 'possible_dopants', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'abs', '(', 'x', '[', "'radii_diff'", ']', ')', ')', 'return', '_get_dopants', '(', 'possible_dopants', ',', 'num_dopants', ',', 'match_oxi_sign', ')']
Get dopant suggestions based on Shannon radii differences. Args: bonded_structure (StructureGraph): A pymatgen structure graph decorated with oxidation states. For example, generated using the CrystalNN.get_bonded_structure() method. num_dopants (int): The nummber of suggestions to return for n- and p-type dopants. match_oxi_sign (bool): Whether to force the dopant and original species to have the same sign of oxidation state. E.g. If the original site is in a negative charge state, then only negative dopants will be returned. Returns: (dict): Dopant suggestions, given as a dictionary with keys "n_type" and "p_type". The suggestions for each doping type are given as a list of dictionaries, each with they keys: - "radii_diff": The difference between the Shannon radii of the species. - "dopant_spcies": The dopant species. - "original_species": The substituted species.
['Get', 'dopant', 'suggestions', 'based', 'on', 'Shannon', 'radii', 'differences', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_prediction/dopant_predictor.py#L54-L114
1,504
DataDog/integrations-core
network/datadog_checks/network/network.py
Network._add_conntrack_stats_metrics
def _add_conntrack_stats_metrics(self, conntrack_path, tags): """ Parse the output of conntrack -S Add the parsed metrics """ try: output, _, _ = get_subprocess_output(["sudo", conntrack_path, "-S"], self.log) # conntrack -S sample: # cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \ # drop=1 early_drop=0 error=0 search_restart=39936711 # cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \ # drop=1 early_drop=0 error=0 search_restart=36983181 lines = output.splitlines() for line in lines: cols = line.split() cpu_num = cols[0].split('=')[-1] cpu_tag = ['cpu:{}'.format(cpu_num)] cols = cols[1:] for cell in cols: metric, value = cell.split('=') self.monotonic_count('system.net.conntrack.{}'.format(metric), int(value), tags=tags + cpu_tag) except SubprocessOutputEmptyError: self.log.debug("Couldn't use {} to get conntrack stats".format(conntrack_path))
python
def _add_conntrack_stats_metrics(self, conntrack_path, tags): """ Parse the output of conntrack -S Add the parsed metrics """ try: output, _, _ = get_subprocess_output(["sudo", conntrack_path, "-S"], self.log) # conntrack -S sample: # cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \ # drop=1 early_drop=0 error=0 search_restart=39936711 # cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \ # drop=1 early_drop=0 error=0 search_restart=36983181 lines = output.splitlines() for line in lines: cols = line.split() cpu_num = cols[0].split('=')[-1] cpu_tag = ['cpu:{}'.format(cpu_num)] cols = cols[1:] for cell in cols: metric, value = cell.split('=') self.monotonic_count('system.net.conntrack.{}'.format(metric), int(value), tags=tags + cpu_tag) except SubprocessOutputEmptyError: self.log.debug("Couldn't use {} to get conntrack stats".format(conntrack_path))
['def', '_add_conntrack_stats_metrics', '(', 'self', ',', 'conntrack_path', ',', 'tags', ')', ':', 'try', ':', 'output', ',', '_', ',', '_', '=', 'get_subprocess_output', '(', '[', '"sudo"', ',', 'conntrack_path', ',', '"-S"', ']', ',', 'self', '.', 'log', ')', '# conntrack -S sample:', '# cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \\', '# drop=1 early_drop=0 error=0 search_restart=39936711', '# cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \\', '# drop=1 early_drop=0 error=0 search_restart=36983181', 'lines', '=', 'output', '.', 'splitlines', '(', ')', 'for', 'line', 'in', 'lines', ':', 'cols', '=', 'line', '.', 'split', '(', ')', 'cpu_num', '=', 'cols', '[', '0', ']', '.', 'split', '(', "'='", ')', '[', '-', '1', ']', 'cpu_tag', '=', '[', "'cpu:{}'", '.', 'format', '(', 'cpu_num', ')', ']', 'cols', '=', 'cols', '[', '1', ':', ']', 'for', 'cell', 'in', 'cols', ':', 'metric', ',', 'value', '=', 'cell', '.', 'split', '(', "'='", ')', 'self', '.', 'monotonic_count', '(', "'system.net.conntrack.{}'", '.', 'format', '(', 'metric', ')', ',', 'int', '(', 'value', ')', ',', 'tags', '=', 'tags', '+', 'cpu_tag', ')', 'except', 'SubprocessOutputEmptyError', ':', 'self', '.', 'log', '.', 'debug', '(', '"Couldn\'t use {} to get conntrack stats"', '.', 'format', '(', 'conntrack_path', ')', ')']
Parse the output of conntrack -S Add the parsed metrics
['Parse', 'the', 'output', 'of', 'conntrack', '-', 'S', 'Add', 'the', 'parsed', 'metrics']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/network/datadog_checks/network/network.py#L462-L487
1,505
pyBookshelf/bookshelf
bookshelf/api_v1.py
enable_mesos_basic_authentication
def enable_mesos_basic_authentication(principal, password): """ enables and adds a new authorized principal """ restart = False secrets_file = '/etc/mesos/secrets' secrets_entry = '%s %s' % (principal, password) if not file_contains(filename=secrets_file, text=secrets_entry, use_sudo=True): file_append(filename=secrets_file, text=secrets_entry, use_sudo=True) file_attribs(secrets_file, mode=700, sudo=True) restart = True # set new startup parameters for mesos-master with quiet(): if secrets_file not in sudo('cat /etc/mesos-master/credentials'): sudo('echo %s > /etc/mesos-master/credentials' % secrets_file) restart = True if not exists('/etc/mesos-master/\?authenticate', use_sudo=True): sudo('touch /etc/mesos-master/\?authenticate') file_attribs('/etc/mesos-master/\?authenticate', mode=700, sudo=True) restart = True if restart: restart_service('mesos-master')
python
def enable_mesos_basic_authentication(principal, password): """ enables and adds a new authorized principal """ restart = False secrets_file = '/etc/mesos/secrets' secrets_entry = '%s %s' % (principal, password) if not file_contains(filename=secrets_file, text=secrets_entry, use_sudo=True): file_append(filename=secrets_file, text=secrets_entry, use_sudo=True) file_attribs(secrets_file, mode=700, sudo=True) restart = True # set new startup parameters for mesos-master with quiet(): if secrets_file not in sudo('cat /etc/mesos-master/credentials'): sudo('echo %s > /etc/mesos-master/credentials' % secrets_file) restart = True if not exists('/etc/mesos-master/\?authenticate', use_sudo=True): sudo('touch /etc/mesos-master/\?authenticate') file_attribs('/etc/mesos-master/\?authenticate', mode=700, sudo=True) restart = True if restart: restart_service('mesos-master')
['def', 'enable_mesos_basic_authentication', '(', 'principal', ',', 'password', ')', ':', 'restart', '=', 'False', 'secrets_file', '=', "'/etc/mesos/secrets'", 'secrets_entry', '=', "'%s %s'", '%', '(', 'principal', ',', 'password', ')', 'if', 'not', 'file_contains', '(', 'filename', '=', 'secrets_file', ',', 'text', '=', 'secrets_entry', ',', 'use_sudo', '=', 'True', ')', ':', 'file_append', '(', 'filename', '=', 'secrets_file', ',', 'text', '=', 'secrets_entry', ',', 'use_sudo', '=', 'True', ')', 'file_attribs', '(', 'secrets_file', ',', 'mode', '=', '700', ',', 'sudo', '=', 'True', ')', 'restart', '=', 'True', '# set new startup parameters for mesos-master', 'with', 'quiet', '(', ')', ':', 'if', 'secrets_file', 'not', 'in', 'sudo', '(', "'cat /etc/mesos-master/credentials'", ')', ':', 'sudo', '(', "'echo %s > /etc/mesos-master/credentials'", '%', 'secrets_file', ')', 'restart', '=', 'True', 'if', 'not', 'exists', '(', "'/etc/mesos-master/\\?authenticate'", ',', 'use_sudo', '=', 'True', ')', ':', 'sudo', '(', "'touch /etc/mesos-master/\\?authenticate'", ')', 'file_attribs', '(', "'/etc/mesos-master/\\?authenticate'", ',', 'mode', '=', '700', ',', 'sudo', '=', 'True', ')', 'restart', '=', 'True', 'if', 'restart', ':', 'restart_service', '(', "'mesos-master'", ')']
enables and adds a new authorized principal
['enables', 'and', 'adds', 'a', 'new', 'authorized', 'principal']
train
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L965-L990
1,506
titusjan/argos
argos/config/abstractcti.py
jsonAsCti
def jsonAsCti(dct): """ Config tree item JSON decoding function. Returns a CTI given a dictionary of attributes. The full class name of desired CTI class should be in dct['_class_'']. """ if '_class_'in dct: full_class_name = dct['_class_'] # TODO: how to handle the full_class_name? cls = import_symbol(full_class_name) return cls.createFromJsonDict(dct) else: return dct
python
def jsonAsCti(dct): """ Config tree item JSON decoding function. Returns a CTI given a dictionary of attributes. The full class name of desired CTI class should be in dct['_class_'']. """ if '_class_'in dct: full_class_name = dct['_class_'] # TODO: how to handle the full_class_name? cls = import_symbol(full_class_name) return cls.createFromJsonDict(dct) else: return dct
['def', 'jsonAsCti', '(', 'dct', ')', ':', 'if', "'_class_'", 'in', 'dct', ':', 'full_class_name', '=', 'dct', '[', "'_class_'", ']', '# TODO: how to handle the full_class_name?', 'cls', '=', 'import_symbol', '(', 'full_class_name', ')', 'return', 'cls', '.', 'createFromJsonDict', '(', 'dct', ')', 'else', ':', 'return', 'dct']
Config tree item JSON decoding function. Returns a CTI given a dictionary of attributes. The full class name of desired CTI class should be in dct['_class_''].
['Config', 'tree', 'item', 'JSON', 'decoding', 'function', '.', 'Returns', 'a', 'CTI', 'given', 'a', 'dictionary', 'of', 'attributes', '.', 'The', 'full', 'class', 'name', 'of', 'desired', 'CTI', 'class', 'should', 'be', 'in', 'dct', '[', '_class_', ']', '.']
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/abstractcti.py#L64-L73
1,507
openpermissions/perch
perch/migrations/user_22e76f4ff8bd41e19aa52839fc8f13a1.py
migrate_user
def migrate_user(instance): """ Move User.organisations['global']['role'] to top-level property and remove verified flag """ instance._resource.pop('verified', None) if 'role' in instance._resource: return instance global_org = instance.organisations.pop('global', {}) instance.role = global_org.get('role', perch.User.roles.default.value) return instance
python
def migrate_user(instance): """ Move User.organisations['global']['role'] to top-level property and remove verified flag """ instance._resource.pop('verified', None) if 'role' in instance._resource: return instance global_org = instance.organisations.pop('global', {}) instance.role = global_org.get('role', perch.User.roles.default.value) return instance
['def', 'migrate_user', '(', 'instance', ')', ':', 'instance', '.', '_resource', '.', 'pop', '(', "'verified'", ',', 'None', ')', 'if', "'role'", 'in', 'instance', '.', '_resource', ':', 'return', 'instance', 'global_org', '=', 'instance', '.', 'organisations', '.', 'pop', '(', "'global'", ',', '{', '}', ')', 'instance', '.', 'role', '=', 'global_org', '.', 'get', '(', "'role'", ',', 'perch', '.', 'User', '.', 'roles', '.', 'default', '.', 'value', ')', 'return', 'instance']
Move User.organisations['global']['role'] to top-level property and remove verified flag
['Move', 'User', '.', 'organisations', '[', 'global', ']', '[', 'role', ']', 'to', 'top', '-', 'level', 'property', 'and', 'remove', 'verified', 'flag']
train
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/migrations/user_22e76f4ff8bd41e19aa52839fc8f13a1.py#L10-L23
1,508
hazelcast/hazelcast-python-client
hazelcast/proxy/multi_map.py
MultiMap.contains_key
def contains_key(self, key): """ Determines whether this multimap contains an entry with the key. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :return: (bool), ``true`` if this multimap contains an entry for the specified key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_contains_key_codec, key_data, key=key_data, thread_id=thread_id())
python
def contains_key(self, key): """ Determines whether this multimap contains an entry with the key. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :return: (bool), ``true`` if this multimap contains an entry for the specified key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_contains_key_codec, key_data, key=key_data, thread_id=thread_id())
['def', 'contains_key', '(', 'self', ',', 'key', ')', ':', 'check_not_none', '(', 'key', ',', '"key can\'t be None"', ')', 'key_data', '=', 'self', '.', '_to_data', '(', 'key', ')', 'return', 'self', '.', '_encode_invoke_on_key', '(', 'multi_map_contains_key_codec', ',', 'key_data', ',', 'key', '=', 'key_data', ',', 'thread_id', '=', 'thread_id', '(', ')', ')']
Determines whether this multimap contains an entry with the key. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :return: (bool), ``true`` if this multimap contains an entry for the specified key.
['Determines', 'whether', 'this', 'multimap', 'contains', 'an', 'entry', 'with', 'the', 'key', '.']
train
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/multi_map.py#L55-L68
1,509
morpframework/morpfw
morpfw/interfaces.py
IStorageBase.search
def search(self, query: Optional[dict] = None, offset: Optional[int] = None, limit: Optional[int] = None, order_by: Union[None, list, tuple] = None) -> Sequence['IModel']: """return search result based on specified rulez query""" raise NotImplementedError
python
def search(self, query: Optional[dict] = None, offset: Optional[int] = None, limit: Optional[int] = None, order_by: Union[None, list, tuple] = None) -> Sequence['IModel']: """return search result based on specified rulez query""" raise NotImplementedError
['def', 'search', '(', 'self', ',', 'query', ':', 'Optional', '[', 'dict', ']', '=', 'None', ',', 'offset', ':', 'Optional', '[', 'int', ']', '=', 'None', ',', 'limit', ':', 'Optional', '[', 'int', ']', '=', 'None', ',', 'order_by', ':', 'Union', '[', 'None', ',', 'list', ',', 'tuple', ']', '=', 'None', ')', '->', 'Sequence', '[', "'IModel'", ']', ':', 'raise', 'NotImplementedError']
return search result based on specified rulez query
['return', 'search', 'result', 'based', 'on', 'specified', 'rulez', 'query']
train
https://github.com/morpframework/morpfw/blob/803fbf29714e6f29456482f1cfbdbd4922b020b0/morpfw/interfaces.py#L137-L141
1,510
ninuxorg/nodeshot
nodeshot/community/mailing/models/outward.py
Outward.get_recipients
def get_recipients(self): """ Determine recipients depending on selected filtering which can be either: * group based * layer based * user based Choosing "group" and "layer" filtering together has the effect of sending the message only to users for which the following conditions are both true: * have a node assigned to one of the selected layers * are part of any of the specified groups (eg: registered, community, trusted) The user based filtering has instead the effect of translating in an **OR** query. Here's a practical example: if selecting "group" and "user" filtering the message will be sent to all the users for which ANY of the following conditions is true: * are part of any of the specified groups (eg: registered, community, trusted) * selected users """ # user model User = get_user_model() # prepare email list emails = [] # the following code is a bit ugly. Considering the titanic amount of work required to build all # the cools functionalities that I have in my mind, I can't be bothered to waste time on making it nicer right now. # if you have ideas on how to improve it to make it cleaner and less cluttered, please join in # this method has unit tests written for it, therefore if you try to change it be sure to check unit tests do not fail after your changes # python manage.py test mailing # send to all case if not self.is_filtered: # retrieve only email DB column of all active users users = User.objects.filter(is_active=True).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] else: # selected users if FILTERS.get('users') in self.filters: # retrieve selected users users = self.users.all().only('email') # loop over selected users for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] # Q is a django object for "complex" filtering queries (not that complex in this case) # init empty Q object that will be needed in case of group filtering q = Q() q2 = Q() # if group filtering is checked if FILTERS.get('groups') in self.filters: # loop over each group for group in self.groups: # if not superusers if group != '0': # add the group to the Q object # this means that the query will look for users of that specific group q = q | Q(groups=int(group)) q2 = q2 | Q(user__groups=int(group)) else: # this must be done manually because superusers is not a group but an attribute of the User model q = q | Q(is_superuser=True) q2 = q2 | Q(user__is_superuser=True) # plus users must be active q = q & Q(is_active=True) # if layer filtering is checked if FILTERS.get('layers') in self.filters: # retrieve non-external layers layers = self.layers.all().only('id') # init empty q3 q3 = Q() # loop over layers to form q3 object for layer in layers: q3 = q3 | Q(layer=layer) # q2: user group if present # q3: layers # retrieve nodes nodes = Node.objects.filter(q2 & q3) # loop over nodes of a layer and get their email for node in nodes: # add email to the recipient list if not already there if node.user.email not in emails: emails += [node.user.email] # else if group filterins is checked but not layers elif FILTERS.get('groups') in self.filters and not FILTERS.get('layers') in self.filters: # retrieve only email DB column of all active users users = User.objects.filter(q).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] return emails
python
def get_recipients(self): """ Determine recipients depending on selected filtering which can be either: * group based * layer based * user based Choosing "group" and "layer" filtering together has the effect of sending the message only to users for which the following conditions are both true: * have a node assigned to one of the selected layers * are part of any of the specified groups (eg: registered, community, trusted) The user based filtering has instead the effect of translating in an **OR** query. Here's a practical example: if selecting "group" and "user" filtering the message will be sent to all the users for which ANY of the following conditions is true: * are part of any of the specified groups (eg: registered, community, trusted) * selected users """ # user model User = get_user_model() # prepare email list emails = [] # the following code is a bit ugly. Considering the titanic amount of work required to build all # the cools functionalities that I have in my mind, I can't be bothered to waste time on making it nicer right now. # if you have ideas on how to improve it to make it cleaner and less cluttered, please join in # this method has unit tests written for it, therefore if you try to change it be sure to check unit tests do not fail after your changes # python manage.py test mailing # send to all case if not self.is_filtered: # retrieve only email DB column of all active users users = User.objects.filter(is_active=True).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] else: # selected users if FILTERS.get('users') in self.filters: # retrieve selected users users = self.users.all().only('email') # loop over selected users for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] # Q is a django object for "complex" filtering queries (not that complex in this case) # init empty Q object that will be needed in case of group filtering q = Q() q2 = Q() # if group filtering is checked if FILTERS.get('groups') in self.filters: # loop over each group for group in self.groups: # if not superusers if group != '0': # add the group to the Q object # this means that the query will look for users of that specific group q = q | Q(groups=int(group)) q2 = q2 | Q(user__groups=int(group)) else: # this must be done manually because superusers is not a group but an attribute of the User model q = q | Q(is_superuser=True) q2 = q2 | Q(user__is_superuser=True) # plus users must be active q = q & Q(is_active=True) # if layer filtering is checked if FILTERS.get('layers') in self.filters: # retrieve non-external layers layers = self.layers.all().only('id') # init empty q3 q3 = Q() # loop over layers to form q3 object for layer in layers: q3 = q3 | Q(layer=layer) # q2: user group if present # q3: layers # retrieve nodes nodes = Node.objects.filter(q2 & q3) # loop over nodes of a layer and get their email for node in nodes: # add email to the recipient list if not already there if node.user.email not in emails: emails += [node.user.email] # else if group filterins is checked but not layers elif FILTERS.get('groups') in self.filters and not FILTERS.get('layers') in self.filters: # retrieve only email DB column of all active users users = User.objects.filter(q).only('email') # loop over users list for user in users: # add email to the recipient list if not already there if user.email not in emails: emails += [user.email] return emails
['def', 'get_recipients', '(', 'self', ')', ':', '# user model', 'User', '=', 'get_user_model', '(', ')', '# prepare email list', 'emails', '=', '[', ']', '# the following code is a bit ugly. Considering the titanic amount of work required to build all', "# the cools functionalities that I have in my mind, I can't be bothered to waste time on making it nicer right now.", '# if you have ideas on how to improve it to make it cleaner and less cluttered, please join in', '# this method has unit tests written for it, therefore if you try to change it be sure to check unit tests do not fail after your changes', '# python manage.py test mailing', '# send to all case', 'if', 'not', 'self', '.', 'is_filtered', ':', '# retrieve only email DB column of all active users', 'users', '=', 'User', '.', 'objects', '.', 'filter', '(', 'is_active', '=', 'True', ')', '.', 'only', '(', "'email'", ')', '# loop over users list', 'for', 'user', 'in', 'users', ':', '# add email to the recipient list if not already there', 'if', 'user', '.', 'email', 'not', 'in', 'emails', ':', 'emails', '+=', '[', 'user', '.', 'email', ']', 'else', ':', '# selected users', 'if', 'FILTERS', '.', 'get', '(', "'users'", ')', 'in', 'self', '.', 'filters', ':', '# retrieve selected users', 'users', '=', 'self', '.', 'users', '.', 'all', '(', ')', '.', 'only', '(', "'email'", ')', '# loop over selected users', 'for', 'user', 'in', 'users', ':', '# add email to the recipient list if not already there', 'if', 'user', '.', 'email', 'not', 'in', 'emails', ':', 'emails', '+=', '[', 'user', '.', 'email', ']', '# Q is a django object for "complex" filtering queries (not that complex in this case)', '# init empty Q object that will be needed in case of group filtering', 'q', '=', 'Q', '(', ')', 'q2', '=', 'Q', '(', ')', '# if group filtering is checked', 'if', 'FILTERS', '.', 'get', '(', "'groups'", ')', 'in', 'self', '.', 'filters', ':', '# loop over each group', 'for', 'group', 'in', 'self', '.', 'groups', ':', '# if not superusers', 'if', 'group', '!=', "'0'", ':', '# add the group to the Q object', '# this means that the query will look for users of that specific group', 'q', '=', 'q', '|', 'Q', '(', 'groups', '=', 'int', '(', 'group', ')', ')', 'q2', '=', 'q2', '|', 'Q', '(', 'user__groups', '=', 'int', '(', 'group', ')', ')', 'else', ':', '# this must be done manually because superusers is not a group but an attribute of the User model', 'q', '=', 'q', '|', 'Q', '(', 'is_superuser', '=', 'True', ')', 'q2', '=', 'q2', '|', 'Q', '(', 'user__is_superuser', '=', 'True', ')', '# plus users must be active', 'q', '=', 'q', '&', 'Q', '(', 'is_active', '=', 'True', ')', '# if layer filtering is checked', 'if', 'FILTERS', '.', 'get', '(', "'layers'", ')', 'in', 'self', '.', 'filters', ':', '# retrieve non-external layers', 'layers', '=', 'self', '.', 'layers', '.', 'all', '(', ')', '.', 'only', '(', "'id'", ')', '# init empty q3', 'q3', '=', 'Q', '(', ')', '# loop over layers to form q3 object', 'for', 'layer', 'in', 'layers', ':', 'q3', '=', 'q3', '|', 'Q', '(', 'layer', '=', 'layer', ')', '# q2: user group if present', '# q3: layers', '# retrieve nodes', 'nodes', '=', 'Node', '.', 'objects', '.', 'filter', '(', 'q2', '&', 'q3', ')', '# loop over nodes of a layer and get their email', 'for', 'node', 'in', 'nodes', ':', '# add email to the recipient list if not already there', 'if', 'node', '.', 'user', '.', 'email', 'not', 'in', 'emails', ':', 'emails', '+=', '[', 'node', '.', 'user', '.', 'email', ']', '# else if group filterins is checked but not layers', 'elif', 'FILTERS', '.', 'get', '(', "'groups'", ')', 'in', 'self', '.', 'filters', 'and', 'not', 'FILTERS', '.', 'get', '(', "'layers'", ')', 'in', 'self', '.', 'filters', ':', '# retrieve only email DB column of all active users', 'users', '=', 'User', '.', 'objects', '.', 'filter', '(', 'q', ')', '.', 'only', '(', "'email'", ')', '# loop over users list', 'for', 'user', 'in', 'users', ':', '# add email to the recipient list if not already there', 'if', 'user', '.', 'email', 'not', 'in', 'emails', ':', 'emails', '+=', '[', 'user', '.', 'email', ']', 'return', 'emails']
Determine recipients depending on selected filtering which can be either: * group based * layer based * user based Choosing "group" and "layer" filtering together has the effect of sending the message only to users for which the following conditions are both true: * have a node assigned to one of the selected layers * are part of any of the specified groups (eg: registered, community, trusted) The user based filtering has instead the effect of translating in an **OR** query. Here's a practical example: if selecting "group" and "user" filtering the message will be sent to all the users for which ANY of the following conditions is true: * are part of any of the specified groups (eg: registered, community, trusted) * selected users
['Determine', 'recipients', 'depending', 'on', 'selected', 'filtering', 'which', 'can', 'be', 'either', ':', '*', 'group', 'based', '*', 'layer', 'based', '*', 'user', 'based']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/mailing/models/outward.py#L67-L166
1,511
Rapptz/discord.py
discord/guild.py
Guild.create_text_channel
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options): """|coro| Creates a :class:`TextChannel` for the guild. Note that you need the :attr:`~Permissions.manage_channels` permission to create the channel. The ``overwrites`` parameter can be used to create a 'secret' channel upon creation. This parameter expects a :class:`dict` of overwrites with the target (either a :class:`Member` or a :class:`Role`) as the key and a :class:`PermissionOverwrite` as the value. .. note:: Creating a channel of a specified position will not update the position of other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit` will be required to update the position of the channel in the channel list. Examples ---------- Creating a basic channel: .. code-block:: python3 channel = await guild.create_text_channel('cool-channel') Creating a "secret" channel: .. code-block:: python3 overwrites = { guild.default_role: discord.PermissionOverwrite(read_messages=False), guild.me: discord.PermissionOverwrite(read_messages=True) } channel = await guild.create_text_channel('secret', overwrites=overwrites) Parameters ----------- name: :class:`str` The channel's name. overwrites A :class:`dict` of target (either a role or a member) to :class:`PermissionOverwrite` to apply upon creation of a channel. Useful for creating secret channels. category: Optional[:class:`CategoryChannel`] The category to place the newly created channel under. The permissions will be automatically synced to category if no overwrites are provided. position: :class:`int` The position in the channel list. This is a number that starts at 0. e.g. the top channel is position 0. topic: Optional[:class:`str`] The new channel's topic. slowmode_delay: :class:`int` Specifies the slowmode rate limit for user in this channel. The maximum value possible is `120`. nsfw: :class:`bool` To mark the channel as NSFW or not. reason: Optional[:class:`str`] The reason for creating this channel. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to create this channel. HTTPException Creating the channel failed. InvalidArgument The permission overwrite information is not in proper form. Returns ------- :class:`TextChannel` The channel that was just created. """ data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options) channel = TextChannel(state=self._state, guild=self, data=data) # temporarily add to the cache self._channels[channel.id] = channel return channel
python
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options): """|coro| Creates a :class:`TextChannel` for the guild. Note that you need the :attr:`~Permissions.manage_channels` permission to create the channel. The ``overwrites`` parameter can be used to create a 'secret' channel upon creation. This parameter expects a :class:`dict` of overwrites with the target (either a :class:`Member` or a :class:`Role`) as the key and a :class:`PermissionOverwrite` as the value. .. note:: Creating a channel of a specified position will not update the position of other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit` will be required to update the position of the channel in the channel list. Examples ---------- Creating a basic channel: .. code-block:: python3 channel = await guild.create_text_channel('cool-channel') Creating a "secret" channel: .. code-block:: python3 overwrites = { guild.default_role: discord.PermissionOverwrite(read_messages=False), guild.me: discord.PermissionOverwrite(read_messages=True) } channel = await guild.create_text_channel('secret', overwrites=overwrites) Parameters ----------- name: :class:`str` The channel's name. overwrites A :class:`dict` of target (either a role or a member) to :class:`PermissionOverwrite` to apply upon creation of a channel. Useful for creating secret channels. category: Optional[:class:`CategoryChannel`] The category to place the newly created channel under. The permissions will be automatically synced to category if no overwrites are provided. position: :class:`int` The position in the channel list. This is a number that starts at 0. e.g. the top channel is position 0. topic: Optional[:class:`str`] The new channel's topic. slowmode_delay: :class:`int` Specifies the slowmode rate limit for user in this channel. The maximum value possible is `120`. nsfw: :class:`bool` To mark the channel as NSFW or not. reason: Optional[:class:`str`] The reason for creating this channel. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to create this channel. HTTPException Creating the channel failed. InvalidArgument The permission overwrite information is not in proper form. Returns ------- :class:`TextChannel` The channel that was just created. """ data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options) channel = TextChannel(state=self._state, guild=self, data=data) # temporarily add to the cache self._channels[channel.id] = channel return channel
['async', 'def', 'create_text_channel', '(', 'self', ',', 'name', ',', '*', ',', 'overwrites', '=', 'None', ',', 'category', '=', 'None', ',', 'reason', '=', 'None', ',', '*', '*', 'options', ')', ':', 'data', '=', 'await', 'self', '.', '_create_channel', '(', 'name', ',', 'overwrites', ',', 'ChannelType', '.', 'text', ',', 'category', ',', 'reason', '=', 'reason', ',', '*', '*', 'options', ')', 'channel', '=', 'TextChannel', '(', 'state', '=', 'self', '.', '_state', ',', 'guild', '=', 'self', ',', 'data', '=', 'data', ')', '# temporarily add to the cache', 'self', '.', '_channels', '[', 'channel', '.', 'id', ']', '=', 'channel', 'return', 'channel']
|coro| Creates a :class:`TextChannel` for the guild. Note that you need the :attr:`~Permissions.manage_channels` permission to create the channel. The ``overwrites`` parameter can be used to create a 'secret' channel upon creation. This parameter expects a :class:`dict` of overwrites with the target (either a :class:`Member` or a :class:`Role`) as the key and a :class:`PermissionOverwrite` as the value. .. note:: Creating a channel of a specified position will not update the position of other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit` will be required to update the position of the channel in the channel list. Examples ---------- Creating a basic channel: .. code-block:: python3 channel = await guild.create_text_channel('cool-channel') Creating a "secret" channel: .. code-block:: python3 overwrites = { guild.default_role: discord.PermissionOverwrite(read_messages=False), guild.me: discord.PermissionOverwrite(read_messages=True) } channel = await guild.create_text_channel('secret', overwrites=overwrites) Parameters ----------- name: :class:`str` The channel's name. overwrites A :class:`dict` of target (either a role or a member) to :class:`PermissionOverwrite` to apply upon creation of a channel. Useful for creating secret channels. category: Optional[:class:`CategoryChannel`] The category to place the newly created channel under. The permissions will be automatically synced to category if no overwrites are provided. position: :class:`int` The position in the channel list. This is a number that starts at 0. e.g. the top channel is position 0. topic: Optional[:class:`str`] The new channel's topic. slowmode_delay: :class:`int` Specifies the slowmode rate limit for user in this channel. The maximum value possible is `120`. nsfw: :class:`bool` To mark the channel as NSFW or not. reason: Optional[:class:`str`] The reason for creating this channel. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to create this channel. HTTPException Creating the channel failed. InvalidArgument The permission overwrite information is not in proper form. Returns ------- :class:`TextChannel` The channel that was just created.
['|coro|']
train
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/guild.py#L622-L705
1,512
ipinfo/python
ipinfo/handler.py
Handler.getDetails
def getDetails(self, ip_address=None): """Get details for specified IP address as a Details object.""" raw_details = self._requestDetails(ip_address) raw_details['country_name'] = self.countries.get(raw_details.get('country')) raw_details['ip_address'] = ipaddress.ip_address(raw_details.get('ip')) raw_details['latitude'], raw_details['longitude'] = self._read_coords(raw_details.get('loc')) return Details(raw_details)
python
def getDetails(self, ip_address=None): """Get details for specified IP address as a Details object.""" raw_details = self._requestDetails(ip_address) raw_details['country_name'] = self.countries.get(raw_details.get('country')) raw_details['ip_address'] = ipaddress.ip_address(raw_details.get('ip')) raw_details['latitude'], raw_details['longitude'] = self._read_coords(raw_details.get('loc')) return Details(raw_details)
['def', 'getDetails', '(', 'self', ',', 'ip_address', '=', 'None', ')', ':', 'raw_details', '=', 'self', '.', '_requestDetails', '(', 'ip_address', ')', 'raw_details', '[', "'country_name'", ']', '=', 'self', '.', 'countries', '.', 'get', '(', 'raw_details', '.', 'get', '(', "'country'", ')', ')', 'raw_details', '[', "'ip_address'", ']', '=', 'ipaddress', '.', 'ip_address', '(', 'raw_details', '.', 'get', '(', "'ip'", ')', ')', 'raw_details', '[', "'latitude'", ']', ',', 'raw_details', '[', "'longitude'", ']', '=', 'self', '.', '_read_coords', '(', 'raw_details', '.', 'get', '(', "'loc'", ')', ')', 'return', 'Details', '(', 'raw_details', ')']
Get details for specified IP address as a Details object.
['Get', 'details', 'for', 'specified', 'IP', 'address', 'as', 'a', 'Details', 'object', '.']
train
https://github.com/ipinfo/python/blob/62fef9136069eab280806cc772dc578d3f1d8d63/ipinfo/handler.py#L44-L50
1,513
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
MAVLink.simstate_encode
def simstate_encode(self, roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng): ''' Status of simulation environment, if used roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) xacc : X acceleration m/s/s (float) yacc : Y acceleration m/s/s (float) zacc : Z acceleration m/s/s (float) xgyro : Angular speed around X axis rad/s (float) ygyro : Angular speed around Y axis rad/s (float) zgyro : Angular speed around Z axis rad/s (float) lat : Latitude in degrees * 1E7 (int32_t) lng : Longitude in degrees * 1E7 (int32_t) ''' return MAVLink_simstate_message(roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng)
python
def simstate_encode(self, roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng): ''' Status of simulation environment, if used roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) xacc : X acceleration m/s/s (float) yacc : Y acceleration m/s/s (float) zacc : Z acceleration m/s/s (float) xgyro : Angular speed around X axis rad/s (float) ygyro : Angular speed around Y axis rad/s (float) zgyro : Angular speed around Z axis rad/s (float) lat : Latitude in degrees * 1E7 (int32_t) lng : Longitude in degrees * 1E7 (int32_t) ''' return MAVLink_simstate_message(roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng)
['def', 'simstate_encode', '(', 'self', ',', 'roll', ',', 'pitch', ',', 'yaw', ',', 'xacc', ',', 'yacc', ',', 'zacc', ',', 'xgyro', ',', 'ygyro', ',', 'zgyro', ',', 'lat', ',', 'lng', ')', ':', 'return', 'MAVLink_simstate_message', '(', 'roll', ',', 'pitch', ',', 'yaw', ',', 'xacc', ',', 'yacc', ',', 'zacc', ',', 'xgyro', ',', 'ygyro', ',', 'zgyro', ',', 'lat', ',', 'lng', ')']
Status of simulation environment, if used roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) xacc : X acceleration m/s/s (float) yacc : Y acceleration m/s/s (float) zacc : Z acceleration m/s/s (float) xgyro : Angular speed around X axis rad/s (float) ygyro : Angular speed around Y axis rad/s (float) zgyro : Angular speed around Z axis rad/s (float) lat : Latitude in degrees * 1E7 (int32_t) lng : Longitude in degrees * 1E7 (int32_t)
['Status', 'of', 'simulation', 'environment', 'if', 'used']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10072-L10089
1,514
ceph/ceph-deploy
ceph_deploy/util/net.py
get_chacra_repo
def get_chacra_repo(shaman_url): """ From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string. """ shaman_response = get_request(shaman_url) chacra_url = shaman_response.geturl() chacra_response = get_request(chacra_url) return chacra_response.read()
python
def get_chacra_repo(shaman_url): """ From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string. """ shaman_response = get_request(shaman_url) chacra_url = shaman_response.geturl() chacra_response = get_request(chacra_url) return chacra_response.read()
['def', 'get_chacra_repo', '(', 'shaman_url', ')', ':', 'shaman_response', '=', 'get_request', '(', 'shaman_url', ')', 'chacra_url', '=', 'shaman_response', '.', 'geturl', '(', ')', 'chacra_response', '=', 'get_request', '(', 'chacra_url', ')', 'return', 'chacra_response', '.', 'read', '(', ')']
From a Shaman URL, get the chacra url for a repository, read the contents that point to the repo and return it as a string.
['From', 'a', 'Shaman', 'URL', 'get', 'the', 'chacra', 'url', 'for', 'a', 'repository', 'read', 'the', 'contents', 'that', 'point', 'to', 'the', 'repo', 'and', 'return', 'it', 'as', 'a', 'string', '.']
train
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/net.py#L390-L399
1,515
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.segments
def segments(self): """ A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ segments = [] for label, slc in zip(self.labels, self.slices): segments.append(Segment(self.data, label, slc, self.get_area(label))) return segments
python
def segments(self): """ A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ segments = [] for label, slc in zip(self.labels, self.slices): segments.append(Segment(self.data, label, slc, self.get_area(label))) return segments
['def', 'segments', '(', 'self', ')', ':', 'segments', '=', '[', ']', 'for', 'label', ',', 'slc', 'in', 'zip', '(', 'self', '.', 'labels', ',', 'self', '.', 'slices', ')', ':', 'segments', '.', 'append', '(', 'Segment', '(', 'self', '.', 'data', ',', 'label', ',', 'slc', ',', 'self', '.', 'get_area', '(', 'label', ')', ')', ')', 'return', 'segments']
A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute.
['A', 'list', 'of', 'Segment', 'objects', '.']
train
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L258-L271
1,516
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.guess_mime_type
def guess_mime_type(self, path): """ Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str """ _, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() return self.extensions_map[ext if ext in self.extensions_map else '']
python
def guess_mime_type(self, path): """ Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str """ _, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() return self.extensions_map[ext if ext in self.extensions_map else '']
['def', 'guess_mime_type', '(', 'self', ',', 'path', ')', ':', '_', ',', 'ext', '=', 'posixpath', '.', 'splitext', '(', 'path', ')', 'if', 'ext', 'in', 'self', '.', 'extensions_map', ':', 'return', 'self', '.', 'extensions_map', '[', 'ext', ']', 'ext', '=', 'ext', '.', 'lower', '(', ')', 'return', 'self', '.', 'extensions_map', '[', 'ext', 'if', 'ext', 'in', 'self', '.', 'extensions_map', 'else', "''", ']']
Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str
['Guess', 'an', 'appropriate', 'MIME', 'type', 'based', 'on', 'the', 'extension', 'of', 'the', 'provided', 'path', '.']
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1135-L1148
1,517
pixelogik/NearPy
nearpy/engine.py
Engine._get_candidates
def _get_candidates(self, v): """ Collect candidates from all buckets from all hashes """ candidates = [] for lshash in self.lshashes: for bucket_key in lshash.hash_vector(v, querying=True): bucket_content = self.storage.get_bucket( lshash.hash_name, bucket_key, ) #print 'Bucket %s size %d' % (bucket_key, len(bucket_content)) candidates.extend(bucket_content) return candidates
python
def _get_candidates(self, v): """ Collect candidates from all buckets from all hashes """ candidates = [] for lshash in self.lshashes: for bucket_key in lshash.hash_vector(v, querying=True): bucket_content = self.storage.get_bucket( lshash.hash_name, bucket_key, ) #print 'Bucket %s size %d' % (bucket_key, len(bucket_content)) candidates.extend(bucket_content) return candidates
['def', '_get_candidates', '(', 'self', ',', 'v', ')', ':', 'candidates', '=', '[', ']', 'for', 'lshash', 'in', 'self', '.', 'lshashes', ':', 'for', 'bucket_key', 'in', 'lshash', '.', 'hash_vector', '(', 'v', ',', 'querying', '=', 'True', ')', ':', 'bucket_content', '=', 'self', '.', 'storage', '.', 'get_bucket', '(', 'lshash', '.', 'hash_name', ',', 'bucket_key', ',', ')', "#print 'Bucket %s size %d' % (bucket_key, len(bucket_content))", 'candidates', '.', 'extend', '(', 'bucket_content', ')', 'return', 'candidates']
Collect candidates from all buckets from all hashes
['Collect', 'candidates', 'from', 'all', 'buckets', 'from', 'all', 'hashes']
train
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/engine.py#L180-L191
1,518
mongodb/mongo-python-driver
pymongo/mongo_client.py
MongoClient.start_session
def start_session(self, causal_consistency=True, default_transaction_options=None): """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return self.__start_session( False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options)
python
def start_session(self, causal_consistency=True, default_transaction_options=None): """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return self.__start_session( False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options)
['def', 'start_session', '(', 'self', ',', 'causal_consistency', '=', 'True', ',', 'default_transaction_options', '=', 'None', ')', ':', 'return', 'self', '.', '__start_session', '(', 'False', ',', 'causal_consistency', '=', 'causal_consistency', ',', 'default_transaction_options', '=', 'default_transaction_options', ')']
Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6
['Start', 'a', 'logical', 'session', '.']
train
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/mongo_client.py#L1736-L1760
1,519
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.get_descriptor_for_layer
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: params['token'] = self.token response = requests.get(self._build_request(layer), params=params) self._layer_descriptor_cache[layer] = response.json() return self._layer_descriptor_cache[layer]
python
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: params['token'] = self.token response = requests.get(self._build_request(layer), params=params) self._layer_descriptor_cache[layer] = response.json() return self._layer_descriptor_cache[layer]
['def', 'get_descriptor_for_layer', '(', 'self', ',', 'layer', ')', ':', 'if', 'not', 'layer', 'in', 'self', '.', '_layer_descriptor_cache', ':', 'params', '=', '{', "'f'", ':', "'pjson'", '}', 'if', 'self', '.', 'token', ':', 'params', '[', "'token'", ']', '=', 'self', '.', 'token', 'response', '=', 'requests', '.', 'get', '(', 'self', '.', '_build_request', '(', 'layer', ')', ',', 'params', '=', 'params', ')', 'self', '.', '_layer_descriptor_cache', '[', 'layer', ']', '=', 'response', '.', 'json', '(', ')', 'return', 'self', '.', '_layer_descriptor_cache', '[', 'layer', ']']
Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there.
['Returns', 'the', 'standard', 'JSON', 'descriptor', 'for', 'the', 'layer', '.', 'There', 'is', 'a', 'lot', 'of', 'usefule', 'information', 'in', 'there', '.']
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L106-L117
1,520
flowroute/txjason
txjason/handler.py
Handler.addToService
def addToService(self, service, namespace=None, seperator='.'): """ Add this Handler's exported methods to an RPC Service instance. """ if namespace is None: namespace = [] if isinstance(namespace, basestring): namespace = [namespace] for n, m in inspect.getmembers(self, inspect.ismethod): if hasattr(m, 'export_rpc'): try: name = seperator.join(namespace + m.export_rpc) except TypeError: name = seperator.join(namespace + [m.export_rpc]) service.add(m, name)
python
def addToService(self, service, namespace=None, seperator='.'): """ Add this Handler's exported methods to an RPC Service instance. """ if namespace is None: namespace = [] if isinstance(namespace, basestring): namespace = [namespace] for n, m in inspect.getmembers(self, inspect.ismethod): if hasattr(m, 'export_rpc'): try: name = seperator.join(namespace + m.export_rpc) except TypeError: name = seperator.join(namespace + [m.export_rpc]) service.add(m, name)
['def', 'addToService', '(', 'self', ',', 'service', ',', 'namespace', '=', 'None', ',', 'seperator', '=', "'.'", ')', ':', 'if', 'namespace', 'is', 'None', ':', 'namespace', '=', '[', ']', 'if', 'isinstance', '(', 'namespace', ',', 'basestring', ')', ':', 'namespace', '=', '[', 'namespace', ']', 'for', 'n', ',', 'm', 'in', 'inspect', '.', 'getmembers', '(', 'self', ',', 'inspect', '.', 'ismethod', ')', ':', 'if', 'hasattr', '(', 'm', ',', "'export_rpc'", ')', ':', 'try', ':', 'name', '=', 'seperator', '.', 'join', '(', 'namespace', '+', 'm', '.', 'export_rpc', ')', 'except', 'TypeError', ':', 'name', '=', 'seperator', '.', 'join', '(', 'namespace', '+', '[', 'm', '.', 'export_rpc', ']', ')', 'service', '.', 'add', '(', 'm', ',', 'name', ')']
Add this Handler's exported methods to an RPC Service instance.
['Add', 'this', 'Handler', 's', 'exported', 'methods', 'to', 'an', 'RPC', 'Service', 'instance', '.']
train
https://github.com/flowroute/txjason/blob/4865bd716847dcbab99acc69daa0c44ae3cc5b89/txjason/handler.py#L44-L59
1,521
materialsproject/pymatgen
pymatgen/analysis/surface_analysis.py
NanoscaleStability.wulff_gform_and_r
def wulff_gform_and_r(self, wulffshape, bulk_entry, r, from_sphere_area=False, r_units="nanometers", e_units="keV", normalize=False, scale_per_atom=False): """ Calculates the formation energy of the particle with arbitrary radius r. Args: wulffshape (WulffShape): Initial, unscaled WulffShape bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk. r (float (Ang)): Arbitrary effective radius of the WulffShape from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (bool): Whether or not to normalize energy by volume scale_per_atom (True): Whether or not to normalize by number of atoms in the particle Returns: particle formation energy (float in keV), effective radius """ # Set up miller_se_dict = wulffshape.miller_energy_dict new_wulff = self.scaled_wulff(wulffshape, r) new_wulff_area = new_wulff.miller_area_dict # calculate surface energy of the particle if not from_sphere_area: # By approximating the particle as a Wulff shape w_vol = new_wulff.volume tot_wulff_se = 0 for hkl in new_wulff_area.keys(): tot_wulff_se += miller_se_dict[hkl] * new_wulff_area[hkl] Ebulk = self.bulk_gform(bulk_entry) * w_vol new_r = new_wulff.effective_radius else: # By approximating the particle as a perfect sphere w_vol = (4 / 3) * np.pi * r ** 3 sphere_sa = 4 * np.pi * r ** 2 tot_wulff_se = wulffshape.weighted_surface_energy * sphere_sa Ebulk = self.bulk_gform(bulk_entry) * w_vol new_r = r new_r = new_r / 10 if r_units == "nanometers" else new_r e = (Ebulk + tot_wulff_se) e = e / 1000 if e_units == "keV" else e e = e / ((4/3)*np.pi*new_r**3) if normalize else e bulk_struct = bulk_entry.structure density = len(bulk_struct)/bulk_struct.lattice.volume e = e/(density*w_vol) if scale_per_atom else e return e, new_r
python
def wulff_gform_and_r(self, wulffshape, bulk_entry, r, from_sphere_area=False, r_units="nanometers", e_units="keV", normalize=False, scale_per_atom=False): """ Calculates the formation energy of the particle with arbitrary radius r. Args: wulffshape (WulffShape): Initial, unscaled WulffShape bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk. r (float (Ang)): Arbitrary effective radius of the WulffShape from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (bool): Whether or not to normalize energy by volume scale_per_atom (True): Whether or not to normalize by number of atoms in the particle Returns: particle formation energy (float in keV), effective radius """ # Set up miller_se_dict = wulffshape.miller_energy_dict new_wulff = self.scaled_wulff(wulffshape, r) new_wulff_area = new_wulff.miller_area_dict # calculate surface energy of the particle if not from_sphere_area: # By approximating the particle as a Wulff shape w_vol = new_wulff.volume tot_wulff_se = 0 for hkl in new_wulff_area.keys(): tot_wulff_se += miller_se_dict[hkl] * new_wulff_area[hkl] Ebulk = self.bulk_gform(bulk_entry) * w_vol new_r = new_wulff.effective_radius else: # By approximating the particle as a perfect sphere w_vol = (4 / 3) * np.pi * r ** 3 sphere_sa = 4 * np.pi * r ** 2 tot_wulff_se = wulffshape.weighted_surface_energy * sphere_sa Ebulk = self.bulk_gform(bulk_entry) * w_vol new_r = r new_r = new_r / 10 if r_units == "nanometers" else new_r e = (Ebulk + tot_wulff_se) e = e / 1000 if e_units == "keV" else e e = e / ((4/3)*np.pi*new_r**3) if normalize else e bulk_struct = bulk_entry.structure density = len(bulk_struct)/bulk_struct.lattice.volume e = e/(density*w_vol) if scale_per_atom else e return e, new_r
['def', 'wulff_gform_and_r', '(', 'self', ',', 'wulffshape', ',', 'bulk_entry', ',', 'r', ',', 'from_sphere_area', '=', 'False', ',', 'r_units', '=', '"nanometers"', ',', 'e_units', '=', '"keV"', ',', 'normalize', '=', 'False', ',', 'scale_per_atom', '=', 'False', ')', ':', '# Set up', 'miller_se_dict', '=', 'wulffshape', '.', 'miller_energy_dict', 'new_wulff', '=', 'self', '.', 'scaled_wulff', '(', 'wulffshape', ',', 'r', ')', 'new_wulff_area', '=', 'new_wulff', '.', 'miller_area_dict', '# calculate surface energy of the particle', 'if', 'not', 'from_sphere_area', ':', '# By approximating the particle as a Wulff shape', 'w_vol', '=', 'new_wulff', '.', 'volume', 'tot_wulff_se', '=', '0', 'for', 'hkl', 'in', 'new_wulff_area', '.', 'keys', '(', ')', ':', 'tot_wulff_se', '+=', 'miller_se_dict', '[', 'hkl', ']', '*', 'new_wulff_area', '[', 'hkl', ']', 'Ebulk', '=', 'self', '.', 'bulk_gform', '(', 'bulk_entry', ')', '*', 'w_vol', 'new_r', '=', 'new_wulff', '.', 'effective_radius', 'else', ':', '# By approximating the particle as a perfect sphere', 'w_vol', '=', '(', '4', '/', '3', ')', '*', 'np', '.', 'pi', '*', 'r', '**', '3', 'sphere_sa', '=', '4', '*', 'np', '.', 'pi', '*', 'r', '**', '2', 'tot_wulff_se', '=', 'wulffshape', '.', 'weighted_surface_energy', '*', 'sphere_sa', 'Ebulk', '=', 'self', '.', 'bulk_gform', '(', 'bulk_entry', ')', '*', 'w_vol', 'new_r', '=', 'r', 'new_r', '=', 'new_r', '/', '10', 'if', 'r_units', '==', '"nanometers"', 'else', 'new_r', 'e', '=', '(', 'Ebulk', '+', 'tot_wulff_se', ')', 'e', '=', 'e', '/', '1000', 'if', 'e_units', '==', '"keV"', 'else', 'e', 'e', '=', 'e', '/', '(', '(', '4', '/', '3', ')', '*', 'np', '.', 'pi', '*', 'new_r', '**', '3', ')', 'if', 'normalize', 'else', 'e', 'bulk_struct', '=', 'bulk_entry', '.', 'structure', 'density', '=', 'len', '(', 'bulk_struct', ')', '/', 'bulk_struct', '.', 'lattice', '.', 'volume', 'e', '=', 'e', '/', '(', 'density', '*', 'w_vol', ')', 'if', 'scale_per_atom', 'else', 'e', 'return', 'e', ',', 'new_r']
Calculates the formation energy of the particle with arbitrary radius r. Args: wulffshape (WulffShape): Initial, unscaled WulffShape bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk. r (float (Ang)): Arbitrary effective radius of the WulffShape from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (bool): Whether or not to normalize energy by volume scale_per_atom (True): Whether or not to normalize by number of atoms in the particle Returns: particle formation energy (float in keV), effective radius
['Calculates', 'the', 'formation', 'energy', 'of', 'the', 'particle', 'with', 'arbitrary', 'radius', 'r', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/surface_analysis.py#L1657-L1711
1,522
log2timeline/plaso
plaso/analyzers/hashers/manager.py
HashersManager.GetHasherNamesFromString
def GetHasherNamesFromString(cls, hasher_names_string): """Retrieves a list of a hasher names from a comma separated string. Takes a string of comma separated hasher names transforms it to a list of hasher names. Args: hasher_names_string (str): comma separated names of hashers to enable, the string 'all' to enable all hashers or 'none' to disable all hashers. Returns: list[str]: names of valid hashers from the string, or an empty list if no valid names are found. """ hasher_names = [] if not hasher_names_string or hasher_names_string.strip() == 'none': return hasher_names if hasher_names_string.strip() == 'all': return cls.GetHasherNames() for hasher_name in hasher_names_string.split(','): hasher_name = hasher_name.strip() if not hasher_name: continue hasher_name = hasher_name.lower() if hasher_name in cls._hasher_classes: hasher_names.append(hasher_name) return hasher_names
python
def GetHasherNamesFromString(cls, hasher_names_string): """Retrieves a list of a hasher names from a comma separated string. Takes a string of comma separated hasher names transforms it to a list of hasher names. Args: hasher_names_string (str): comma separated names of hashers to enable, the string 'all' to enable all hashers or 'none' to disable all hashers. Returns: list[str]: names of valid hashers from the string, or an empty list if no valid names are found. """ hasher_names = [] if not hasher_names_string or hasher_names_string.strip() == 'none': return hasher_names if hasher_names_string.strip() == 'all': return cls.GetHasherNames() for hasher_name in hasher_names_string.split(','): hasher_name = hasher_name.strip() if not hasher_name: continue hasher_name = hasher_name.lower() if hasher_name in cls._hasher_classes: hasher_names.append(hasher_name) return hasher_names
['def', 'GetHasherNamesFromString', '(', 'cls', ',', 'hasher_names_string', ')', ':', 'hasher_names', '=', '[', ']', 'if', 'not', 'hasher_names_string', 'or', 'hasher_names_string', '.', 'strip', '(', ')', '==', "'none'", ':', 'return', 'hasher_names', 'if', 'hasher_names_string', '.', 'strip', '(', ')', '==', "'all'", ':', 'return', 'cls', '.', 'GetHasherNames', '(', ')', 'for', 'hasher_name', 'in', 'hasher_names_string', '.', 'split', '(', "','", ')', ':', 'hasher_name', '=', 'hasher_name', '.', 'strip', '(', ')', 'if', 'not', 'hasher_name', ':', 'continue', 'hasher_name', '=', 'hasher_name', '.', 'lower', '(', ')', 'if', 'hasher_name', 'in', 'cls', '.', '_hasher_classes', ':', 'hasher_names', '.', 'append', '(', 'hasher_name', ')', 'return', 'hasher_names']
Retrieves a list of a hasher names from a comma separated string. Takes a string of comma separated hasher names transforms it to a list of hasher names. Args: hasher_names_string (str): comma separated names of hashers to enable, the string 'all' to enable all hashers or 'none' to disable all hashers. Returns: list[str]: names of valid hashers from the string, or an empty list if no valid names are found.
['Retrieves', 'a', 'list', 'of', 'a', 'hasher', 'names', 'from', 'a', 'comma', 'separated', 'string', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analyzers/hashers/manager.py#L33-L65
1,523
msiedlarek/wiring
wiring/graph.py
Graph.acquire
def acquire(self, specification, arguments=None): """ Returns an object for `specification` injecting its provider with a mix of its :term:`dependencies <dependency>` and given `arguments`. If there is a conflict between the injectable dependencies and `arguments`, the value from `arguments` is used. When one of `arguments` keys is neither an integer nor a string a `TypeError` is raised. :param specification: An object :term:`specification`. :param arguments: A dictionary of arguments given to the object :term:`provider`, overriding those that would be injected or filling in for those that wouldn't. Positional arguments should be stored under 0-based integer keys. :raises: TypeError """ if arguments is None: realized_dependencies = {} else: realized_dependencies = copy.copy(arguments) provider = self.providers[specification] scope = None if provider.scope is not None: try: scope = self.scopes[provider.scope] except KeyError: raise UnknownScopeError(provider.scope) if scope is not None and specification in scope: return scope[specification] dependencies = six.iteritems(provider.dependencies) for argument, dependency_specification in dependencies: if argument not in realized_dependencies: if isinstance(dependency_specification, Factory): realized_dependencies[argument] = self.FactoryProxy( self, dependency_specification.specification ) else: realized_dependencies[argument] = self.acquire( dependency_specification ) args = [] kwargs = {} for argument, value in six.iteritems(realized_dependencies): if isinstance(argument, six.integer_types): # Integer keys are for positional arguments. if len(args) <= argument: args.extend([None] * (argument + 1 - len(args))) args[argument] = value elif isinstance(argument, six.string_types): # String keys are for keyword arguments. kwargs[argument] = value else: raise TypeError( "{} is not a valid argument key".format(repr(argument)) ) instance = provider(*args, **kwargs) if scope is not None: scope[specification] = instance return instance
python
def acquire(self, specification, arguments=None): """ Returns an object for `specification` injecting its provider with a mix of its :term:`dependencies <dependency>` and given `arguments`. If there is a conflict between the injectable dependencies and `arguments`, the value from `arguments` is used. When one of `arguments` keys is neither an integer nor a string a `TypeError` is raised. :param specification: An object :term:`specification`. :param arguments: A dictionary of arguments given to the object :term:`provider`, overriding those that would be injected or filling in for those that wouldn't. Positional arguments should be stored under 0-based integer keys. :raises: TypeError """ if arguments is None: realized_dependencies = {} else: realized_dependencies = copy.copy(arguments) provider = self.providers[specification] scope = None if provider.scope is not None: try: scope = self.scopes[provider.scope] except KeyError: raise UnknownScopeError(provider.scope) if scope is not None and specification in scope: return scope[specification] dependencies = six.iteritems(provider.dependencies) for argument, dependency_specification in dependencies: if argument not in realized_dependencies: if isinstance(dependency_specification, Factory): realized_dependencies[argument] = self.FactoryProxy( self, dependency_specification.specification ) else: realized_dependencies[argument] = self.acquire( dependency_specification ) args = [] kwargs = {} for argument, value in six.iteritems(realized_dependencies): if isinstance(argument, six.integer_types): # Integer keys are for positional arguments. if len(args) <= argument: args.extend([None] * (argument + 1 - len(args))) args[argument] = value elif isinstance(argument, six.string_types): # String keys are for keyword arguments. kwargs[argument] = value else: raise TypeError( "{} is not a valid argument key".format(repr(argument)) ) instance = provider(*args, **kwargs) if scope is not None: scope[specification] = instance return instance
['def', 'acquire', '(', 'self', ',', 'specification', ',', 'arguments', '=', 'None', ')', ':', 'if', 'arguments', 'is', 'None', ':', 'realized_dependencies', '=', '{', '}', 'else', ':', 'realized_dependencies', '=', 'copy', '.', 'copy', '(', 'arguments', ')', 'provider', '=', 'self', '.', 'providers', '[', 'specification', ']', 'scope', '=', 'None', 'if', 'provider', '.', 'scope', 'is', 'not', 'None', ':', 'try', ':', 'scope', '=', 'self', '.', 'scopes', '[', 'provider', '.', 'scope', ']', 'except', 'KeyError', ':', 'raise', 'UnknownScopeError', '(', 'provider', '.', 'scope', ')', 'if', 'scope', 'is', 'not', 'None', 'and', 'specification', 'in', 'scope', ':', 'return', 'scope', '[', 'specification', ']', 'dependencies', '=', 'six', '.', 'iteritems', '(', 'provider', '.', 'dependencies', ')', 'for', 'argument', ',', 'dependency_specification', 'in', 'dependencies', ':', 'if', 'argument', 'not', 'in', 'realized_dependencies', ':', 'if', 'isinstance', '(', 'dependency_specification', ',', 'Factory', ')', ':', 'realized_dependencies', '[', 'argument', ']', '=', 'self', '.', 'FactoryProxy', '(', 'self', ',', 'dependency_specification', '.', 'specification', ')', 'else', ':', 'realized_dependencies', '[', 'argument', ']', '=', 'self', '.', 'acquire', '(', 'dependency_specification', ')', 'args', '=', '[', ']', 'kwargs', '=', '{', '}', 'for', 'argument', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'realized_dependencies', ')', ':', 'if', 'isinstance', '(', 'argument', ',', 'six', '.', 'integer_types', ')', ':', '# Integer keys are for positional arguments.', 'if', 'len', '(', 'args', ')', '<=', 'argument', ':', 'args', '.', 'extend', '(', '[', 'None', ']', '*', '(', 'argument', '+', '1', '-', 'len', '(', 'args', ')', ')', ')', 'args', '[', 'argument', ']', '=', 'value', 'elif', 'isinstance', '(', 'argument', ',', 'six', '.', 'string_types', ')', ':', '# String keys are for keyword arguments.', 'kwargs', '[', 'argument', ']', '=', 'value', 'else', ':', 'raise', 'TypeError', '(', '"{} is not a valid argument key"', '.', 'format', '(', 'repr', '(', 'argument', ')', ')', ')', 'instance', '=', 'provider', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'scope', 'is', 'not', 'None', ':', 'scope', '[', 'specification', ']', '=', 'instance', 'return', 'instance']
Returns an object for `specification` injecting its provider with a mix of its :term:`dependencies <dependency>` and given `arguments`. If there is a conflict between the injectable dependencies and `arguments`, the value from `arguments` is used. When one of `arguments` keys is neither an integer nor a string a `TypeError` is raised. :param specification: An object :term:`specification`. :param arguments: A dictionary of arguments given to the object :term:`provider`, overriding those that would be injected or filling in for those that wouldn't. Positional arguments should be stored under 0-based integer keys. :raises: TypeError
['Returns', 'an', 'object', 'for', 'specification', 'injecting', 'its', 'provider', 'with', 'a', 'mix', 'of', 'its', ':', 'term', ':', 'dependencies', '<dependency', '>', 'and', 'given', 'arguments', '.', 'If', 'there', 'is', 'a', 'conflict', 'between', 'the', 'injectable', 'dependencies', 'and', 'arguments', 'the', 'value', 'from', 'arguments', 'is', 'used', '.']
train
https://github.com/msiedlarek/wiring/blob/c32165b680356fe9f1e422a1d11127f867065f94/wiring/graph.py#L150-L222
1,524
fastai/fastai
fastai/tabular/data.py
tabular_learner
def tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None, ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs): "Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params." emb_szs = data.get_emb_szs(ifnone(emb_szs, {})) model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop, y_range=y_range, use_bn=use_bn) return Learner(data, model, metrics=metrics, **learn_kwargs)
python
def tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None, ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs): "Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params." emb_szs = data.get_emb_szs(ifnone(emb_szs, {})) model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop, y_range=y_range, use_bn=use_bn) return Learner(data, model, metrics=metrics, **learn_kwargs)
['def', 'tabular_learner', '(', 'data', ':', 'DataBunch', ',', 'layers', ':', 'Collection', '[', 'int', ']', ',', 'emb_szs', ':', 'Dict', '[', 'str', ',', 'int', ']', '=', 'None', ',', 'metrics', '=', 'None', ',', 'ps', ':', 'Collection', '[', 'float', ']', '=', 'None', ',', 'emb_drop', ':', 'float', '=', '0.', ',', 'y_range', ':', 'OptRange', '=', 'None', ',', 'use_bn', ':', 'bool', '=', 'True', ',', '*', '*', 'learn_kwargs', ')', ':', 'emb_szs', '=', 'data', '.', 'get_emb_szs', '(', 'ifnone', '(', 'emb_szs', ',', '{', '}', ')', ')', 'model', '=', 'TabularModel', '(', 'emb_szs', ',', 'len', '(', 'data', '.', 'cont_names', ')', ',', 'out_sz', '=', 'data', '.', 'c', ',', 'layers', '=', 'layers', ',', 'ps', '=', 'ps', ',', 'emb_drop', '=', 'emb_drop', ',', 'y_range', '=', 'y_range', ',', 'use_bn', '=', 'use_bn', ')', 'return', 'Learner', '(', 'data', ',', 'model', ',', 'metrics', '=', 'metrics', ',', '*', '*', 'learn_kwargs', ')']
Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params.
['Get', 'a', 'Learner', 'using', 'data', 'with', 'metrics', 'including', 'a', 'TabularModel', 'created', 'using', 'the', 'remaining', 'params', '.']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/tabular/data.py#L170-L176
1,525
dhermes/bezier
src/bezier/_surface_helpers.py
_jacobian_det
def _jacobian_det(nodes, degree, st_vals): r"""Compute :math:`\det(D B)` at a set of values. This requires that :math:`B \in \mathbf{R}^2`. .. note:: This assumes but does not check that each ``(s, t)`` in ``st_vals`` is inside the reference triangle. .. warning:: This relies on helpers in :mod:`bezier` for computing the Jacobian of the surface. However, these helpers are not part of the public surface and may change or be removed. .. testsetup:: jacobian-det import numpy as np import bezier from bezier._surface_helpers import jacobian_det .. doctest:: jacobian-det :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 1.0, 2.0, 0.0, 1.5, 0.0], ... [0.0, 0.0, 0.0, 1.0, 1.5, 2.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> st_vals = np.asfortranarray([ ... [0.25, 0.0 ], ... [0.75, 0.125], ... [0.5 , 0.5 ], ... ]) >>> s_vals, t_vals = st_vals.T >>> surface.evaluate_cartesian_multi(st_vals) array([[0.5 , 1.59375, 1.25 ], [0. , 0.34375, 1.25 ]]) >>> # B(s, t) = [s(t + 2), t(s + 2)] >>> s_vals * (t_vals + 2) array([0.5 , 1.59375, 1.25 ]) >>> t_vals * (s_vals + 2) array([0. , 0.34375, 1.25 ]) >>> jacobian_det(nodes, 2, st_vals) array([4.5 , 5.75, 6. ]) >>> # det(DB) = 2(s + t + 2) >>> 2 * (s_vals + t_vals + 2) array([4.5 , 5.75, 6. ]) .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): Nodes defining a B |eacute| zier surface :math:`B(s, t)`. degree (int): The degree of the surface :math:`B`. st_vals (numpy.ndarray): ``N x 2`` array of Cartesian inputs to B |eacute| zier surfaces defined by :math:`B_s` and :math:`B_t`. Returns: numpy.ndarray: Array of all determinant values, one for each row in ``st_vals``. """ jac_nodes = jacobian_both(nodes, degree, 2) if degree == 1: num_vals, _ = st_vals.shape bs_bt_vals = np.repeat(jac_nodes, num_vals, axis=1) else: bs_bt_vals = evaluate_cartesian_multi( jac_nodes, degree - 1, st_vals, 4 ) # Take the determinant for each (s, t). return ( bs_bt_vals[0, :] * bs_bt_vals[3, :] - bs_bt_vals[1, :] * bs_bt_vals[2, :] )
python
def _jacobian_det(nodes, degree, st_vals): r"""Compute :math:`\det(D B)` at a set of values. This requires that :math:`B \in \mathbf{R}^2`. .. note:: This assumes but does not check that each ``(s, t)`` in ``st_vals`` is inside the reference triangle. .. warning:: This relies on helpers in :mod:`bezier` for computing the Jacobian of the surface. However, these helpers are not part of the public surface and may change or be removed. .. testsetup:: jacobian-det import numpy as np import bezier from bezier._surface_helpers import jacobian_det .. doctest:: jacobian-det :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 1.0, 2.0, 0.0, 1.5, 0.0], ... [0.0, 0.0, 0.0, 1.0, 1.5, 2.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> st_vals = np.asfortranarray([ ... [0.25, 0.0 ], ... [0.75, 0.125], ... [0.5 , 0.5 ], ... ]) >>> s_vals, t_vals = st_vals.T >>> surface.evaluate_cartesian_multi(st_vals) array([[0.5 , 1.59375, 1.25 ], [0. , 0.34375, 1.25 ]]) >>> # B(s, t) = [s(t + 2), t(s + 2)] >>> s_vals * (t_vals + 2) array([0.5 , 1.59375, 1.25 ]) >>> t_vals * (s_vals + 2) array([0. , 0.34375, 1.25 ]) >>> jacobian_det(nodes, 2, st_vals) array([4.5 , 5.75, 6. ]) >>> # det(DB) = 2(s + t + 2) >>> 2 * (s_vals + t_vals + 2) array([4.5 , 5.75, 6. ]) .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): Nodes defining a B |eacute| zier surface :math:`B(s, t)`. degree (int): The degree of the surface :math:`B`. st_vals (numpy.ndarray): ``N x 2`` array of Cartesian inputs to B |eacute| zier surfaces defined by :math:`B_s` and :math:`B_t`. Returns: numpy.ndarray: Array of all determinant values, one for each row in ``st_vals``. """ jac_nodes = jacobian_both(nodes, degree, 2) if degree == 1: num_vals, _ = st_vals.shape bs_bt_vals = np.repeat(jac_nodes, num_vals, axis=1) else: bs_bt_vals = evaluate_cartesian_multi( jac_nodes, degree - 1, st_vals, 4 ) # Take the determinant for each (s, t). return ( bs_bt_vals[0, :] * bs_bt_vals[3, :] - bs_bt_vals[1, :] * bs_bt_vals[2, :] )
['def', '_jacobian_det', '(', 'nodes', ',', 'degree', ',', 'st_vals', ')', ':', 'jac_nodes', '=', 'jacobian_both', '(', 'nodes', ',', 'degree', ',', '2', ')', 'if', 'degree', '==', '1', ':', 'num_vals', ',', '_', '=', 'st_vals', '.', 'shape', 'bs_bt_vals', '=', 'np', '.', 'repeat', '(', 'jac_nodes', ',', 'num_vals', ',', 'axis', '=', '1', ')', 'else', ':', 'bs_bt_vals', '=', 'evaluate_cartesian_multi', '(', 'jac_nodes', ',', 'degree', '-', '1', ',', 'st_vals', ',', '4', ')', '# Take the determinant for each (s, t).', 'return', '(', 'bs_bt_vals', '[', '0', ',', ':', ']', '*', 'bs_bt_vals', '[', '3', ',', ':', ']', '-', 'bs_bt_vals', '[', '1', ',', ':', ']', '*', 'bs_bt_vals', '[', '2', ',', ':', ']', ')']
r"""Compute :math:`\det(D B)` at a set of values. This requires that :math:`B \in \mathbf{R}^2`. .. note:: This assumes but does not check that each ``(s, t)`` in ``st_vals`` is inside the reference triangle. .. warning:: This relies on helpers in :mod:`bezier` for computing the Jacobian of the surface. However, these helpers are not part of the public surface and may change or be removed. .. testsetup:: jacobian-det import numpy as np import bezier from bezier._surface_helpers import jacobian_det .. doctest:: jacobian-det :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 1.0, 2.0, 0.0, 1.5, 0.0], ... [0.0, 0.0, 0.0, 1.0, 1.5, 2.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> st_vals = np.asfortranarray([ ... [0.25, 0.0 ], ... [0.75, 0.125], ... [0.5 , 0.5 ], ... ]) >>> s_vals, t_vals = st_vals.T >>> surface.evaluate_cartesian_multi(st_vals) array([[0.5 , 1.59375, 1.25 ], [0. , 0.34375, 1.25 ]]) >>> # B(s, t) = [s(t + 2), t(s + 2)] >>> s_vals * (t_vals + 2) array([0.5 , 1.59375, 1.25 ]) >>> t_vals * (s_vals + 2) array([0. , 0.34375, 1.25 ]) >>> jacobian_det(nodes, 2, st_vals) array([4.5 , 5.75, 6. ]) >>> # det(DB) = 2(s + t + 2) >>> 2 * (s_vals + t_vals + 2) array([4.5 , 5.75, 6. ]) .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): Nodes defining a B |eacute| zier surface :math:`B(s, t)`. degree (int): The degree of the surface :math:`B`. st_vals (numpy.ndarray): ``N x 2`` array of Cartesian inputs to B |eacute| zier surfaces defined by :math:`B_s` and :math:`B_t`. Returns: numpy.ndarray: Array of all determinant values, one for each row in ``st_vals``.
['r', 'Compute', ':', 'math', ':', '\\', 'det', '(', 'D', 'B', ')', 'at', 'a', 'set', 'of', 'values', '.']
train
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1276-L1356
1,526
bitesofcode/projexui
projexui/widgets/xcalendarwidget/xcalendarwidget.py
XCalendarWidget.dragDropFilter
def dragDropFilter( self ): """ Returns a drag and drop filter method. If set, the method should \ accept 2 arguments: a QWidget and a drag/drop event and process it. :usage |from projexui.qt.QtCore import QEvent | |class MyWidget(QWidget): | def __init__( self, parent ): | super(MyWidget, self).__init__(parent) | | self._tree = XCalendarWidget(self) | self._tree.setDragDropFilter(MyWidget.handleDragDrop) | | @staticmethod | def handleDragDrop(object, event): | if ( event.type() == QEvent.DragEnter ): | event.acceptProposedActions() | elif ( event.type() == QEvent.Drop ): | print 'dropping' :return <function> || <method> || None """ filt = None if ( self._dragDropFilterRef ): filt = self._dragDropFilterRef() if ( not filt ): self._dragDropFilterRef = None return filt
python
def dragDropFilter( self ): """ Returns a drag and drop filter method. If set, the method should \ accept 2 arguments: a QWidget and a drag/drop event and process it. :usage |from projexui.qt.QtCore import QEvent | |class MyWidget(QWidget): | def __init__( self, parent ): | super(MyWidget, self).__init__(parent) | | self._tree = XCalendarWidget(self) | self._tree.setDragDropFilter(MyWidget.handleDragDrop) | | @staticmethod | def handleDragDrop(object, event): | if ( event.type() == QEvent.DragEnter ): | event.acceptProposedActions() | elif ( event.type() == QEvent.Drop ): | print 'dropping' :return <function> || <method> || None """ filt = None if ( self._dragDropFilterRef ): filt = self._dragDropFilterRef() if ( not filt ): self._dragDropFilterRef = None return filt
['def', 'dragDropFilter', '(', 'self', ')', ':', 'filt', '=', 'None', 'if', '(', 'self', '.', '_dragDropFilterRef', ')', ':', 'filt', '=', 'self', '.', '_dragDropFilterRef', '(', ')', 'if', '(', 'not', 'filt', ')', ':', 'self', '.', '_dragDropFilterRef', '=', 'None', 'return', 'filt']
Returns a drag and drop filter method. If set, the method should \ accept 2 arguments: a QWidget and a drag/drop event and process it. :usage |from projexui.qt.QtCore import QEvent | |class MyWidget(QWidget): | def __init__( self, parent ): | super(MyWidget, self).__init__(parent) | | self._tree = XCalendarWidget(self) | self._tree.setDragDropFilter(MyWidget.handleDragDrop) | | @staticmethod | def handleDragDrop(object, event): | if ( event.type() == QEvent.DragEnter ): | event.acceptProposedActions() | elif ( event.type() == QEvent.Drop ): | print 'dropping' :return <function> || <method> || None
['Returns', 'a', 'drag', 'and', 'drop', 'filter', 'method', '.', 'If', 'set', 'the', 'method', 'should', '\\', 'accept', '2', 'arguments', ':', 'a', 'QWidget', 'and', 'a', 'drag', '/', 'drop', 'event', 'and', 'process', 'it', '.', ':', 'usage', '|from', 'projexui', '.', 'qt', '.', 'QtCore', 'import', 'QEvent', '|', '|class', 'MyWidget', '(', 'QWidget', ')', ':', '|', 'def', '__init__', '(', 'self', 'parent', ')', ':', '|', 'super', '(', 'MyWidget', 'self', ')', '.', '__init__', '(', 'parent', ')', '|', '|', 'self', '.', '_tree', '=', 'XCalendarWidget', '(', 'self', ')', '|', 'self', '.', '_tree', '.', 'setDragDropFilter', '(', 'MyWidget', '.', 'handleDragDrop', ')', '|', '|']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarwidget.py#L121-L151
1,527
linode/linode_api4-python
linode_api4/paginated_list.py
PaginatedList.make_paginated_list
def make_paginated_list(json, client, cls, parent_id=None, page_url=None, filters=None): """ Returns a PaginatedList populated with the first page of data provided, and the ability to load additional pages. This should not be called outside of the :any:`LinodeClient` class. :param json: The JSON list to use as the first page :param client: A LinodeClient to use to load additional pages :param parent_id: The parent ID for derived objects :param page_url: The URL to use when loading more pages :param cls: The class to instantiate for objects :param filters: The filters used when making the call that generated this list. If not provided, this will fail when loading additional pages. :returns: An instance of PaginatedList that will represent the entire collection whose first page is json """ l = PaginatedList.make_list(json["data"], client, cls, parent_id=parent_id) p = PaginatedList(client, page_url, page=l, max_pages=json['pages'], total_items=json['results'], parent_id=parent_id, filters=filters) return p
python
def make_paginated_list(json, client, cls, parent_id=None, page_url=None, filters=None): """ Returns a PaginatedList populated with the first page of data provided, and the ability to load additional pages. This should not be called outside of the :any:`LinodeClient` class. :param json: The JSON list to use as the first page :param client: A LinodeClient to use to load additional pages :param parent_id: The parent ID for derived objects :param page_url: The URL to use when loading more pages :param cls: The class to instantiate for objects :param filters: The filters used when making the call that generated this list. If not provided, this will fail when loading additional pages. :returns: An instance of PaginatedList that will represent the entire collection whose first page is json """ l = PaginatedList.make_list(json["data"], client, cls, parent_id=parent_id) p = PaginatedList(client, page_url, page=l, max_pages=json['pages'], total_items=json['results'], parent_id=parent_id, filters=filters) return p
['def', 'make_paginated_list', '(', 'json', ',', 'client', ',', 'cls', ',', 'parent_id', '=', 'None', ',', 'page_url', '=', 'None', ',', 'filters', '=', 'None', ')', ':', 'l', '=', 'PaginatedList', '.', 'make_list', '(', 'json', '[', '"data"', ']', ',', 'client', ',', 'cls', ',', 'parent_id', '=', 'parent_id', ')', 'p', '=', 'PaginatedList', '(', 'client', ',', 'page_url', ',', 'page', '=', 'l', ',', 'max_pages', '=', 'json', '[', "'pages'", ']', ',', 'total_items', '=', 'json', '[', "'results'", ']', ',', 'parent_id', '=', 'parent_id', ',', 'filters', '=', 'filters', ')', 'return', 'p']
Returns a PaginatedList populated with the first page of data provided, and the ability to load additional pages. This should not be called outside of the :any:`LinodeClient` class. :param json: The JSON list to use as the first page :param client: A LinodeClient to use to load additional pages :param parent_id: The parent ID for derived objects :param page_url: The URL to use when loading more pages :param cls: The class to instantiate for objects :param filters: The filters used when making the call that generated this list. If not provided, this will fail when loading additional pages. :returns: An instance of PaginatedList that will represent the entire collection whose first page is json
['Returns', 'a', 'PaginatedList', 'populated', 'with', 'the', 'first', 'page', 'of', 'data', 'provided', 'and', 'the', 'ability', 'to', 'load', 'additional', 'pages', '.', 'This', 'should', 'not', 'be', 'called', 'outside', 'of', 'the', ':', 'any', ':', 'LinodeClient', 'class', '.']
train
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/paginated_list.py#L197-L219
1,528
wearpants/instrument
instrument/__init__.py
first
def first(iterable = None, *, name = None, metric = call_default): """Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """ if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)
python
def first(iterable = None, *, name = None, metric = call_default): """Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """ if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)
['def', 'first', '(', 'iterable', '=', 'None', ',', '*', ',', 'name', '=', 'None', ',', 'metric', '=', 'call_default', ')', ':', 'if', 'iterable', 'is', 'None', ':', 'return', '_first_decorator', '(', 'name', ',', 'metric', ')', 'else', ':', 'return', '_do_first', '(', 'iterable', ',', 'name', ',', 'metric', ')']
Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric
['Measure', 'time', 'elapsed', 'to', 'produce', 'first', 'item', 'of', 'an', 'iterable']
train
https://github.com/wearpants/instrument/blob/a0f6103574ab58a82361a951e5e56b69aedfe294/instrument/__init__.py#L137-L147
1,529
saltstack/salt
salt/modules/disk.py
iostat
def iostat(interval=1, count=5, disks=None): ''' Gather and return (averaged) IO stats. .. versionadded:: 2016.3.0 .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' disk.iostat 1 5 disks=sda ''' if salt.utils.platform.is_linux(): return _iostat_linux(interval, count, disks) elif salt.utils.platform.is_freebsd(): return _iostat_fbsd(interval, count, disks) elif salt.utils.platform.is_aix(): return _iostat_aix(interval, count, disks)
python
def iostat(interval=1, count=5, disks=None): ''' Gather and return (averaged) IO stats. .. versionadded:: 2016.3.0 .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' disk.iostat 1 5 disks=sda ''' if salt.utils.platform.is_linux(): return _iostat_linux(interval, count, disks) elif salt.utils.platform.is_freebsd(): return _iostat_fbsd(interval, count, disks) elif salt.utils.platform.is_aix(): return _iostat_aix(interval, count, disks)
['def', 'iostat', '(', 'interval', '=', '1', ',', 'count', '=', '5', ',', 'disks', '=', 'None', ')', ':', 'if', 'salt', '.', 'utils', '.', 'platform', '.', 'is_linux', '(', ')', ':', 'return', '_iostat_linux', '(', 'interval', ',', 'count', ',', 'disks', ')', 'elif', 'salt', '.', 'utils', '.', 'platform', '.', 'is_freebsd', '(', ')', ':', 'return', '_iostat_fbsd', '(', 'interval', ',', 'count', ',', 'disks', ')', 'elif', 'salt', '.', 'utils', '.', 'platform', '.', 'is_aix', '(', ')', ':', 'return', '_iostat_aix', '(', 'interval', ',', 'count', ',', 'disks', ')']
Gather and return (averaged) IO stats. .. versionadded:: 2016.3.0 .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' disk.iostat 1 5 disks=sda
['Gather', 'and', 'return', '(', 'averaged', ')', 'IO', 'stats', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/disk.py#L773-L793
1,530
senaite/senaite.core
bika/lims/browser/analysisrequest/resultsinterpretation.py
ARResultsInterpretationView.handle_form_submit
def handle_form_submit(self): """Handle form submission """ protect.CheckAuthenticator(self.request) logger.info("Handle ResultsInterpration Submit") # Save the results interpretation res = self.request.form.get("ResultsInterpretationDepts", []) self.context.setResultsInterpretationDepts(res) self.add_status_message(_("Changes Saved"), level="info") # reindex the object after save to update all catalog metadata self.context.reindexObject() # notify object edited event event.notify(ObjectEditedEvent(self.context))
python
def handle_form_submit(self): """Handle form submission """ protect.CheckAuthenticator(self.request) logger.info("Handle ResultsInterpration Submit") # Save the results interpretation res = self.request.form.get("ResultsInterpretationDepts", []) self.context.setResultsInterpretationDepts(res) self.add_status_message(_("Changes Saved"), level="info") # reindex the object after save to update all catalog metadata self.context.reindexObject() # notify object edited event event.notify(ObjectEditedEvent(self.context))
['def', 'handle_form_submit', '(', 'self', ')', ':', 'protect', '.', 'CheckAuthenticator', '(', 'self', '.', 'request', ')', 'logger', '.', 'info', '(', '"Handle ResultsInterpration Submit"', ')', '# Save the results interpretation', 'res', '=', 'self', '.', 'request', '.', 'form', '.', 'get', '(', '"ResultsInterpretationDepts"', ',', '[', ']', ')', 'self', '.', 'context', '.', 'setResultsInterpretationDepts', '(', 'res', ')', 'self', '.', 'add_status_message', '(', '_', '(', '"Changes Saved"', ')', ',', 'level', '=', '"info"', ')', '# reindex the object after save to update all catalog metadata', 'self', '.', 'context', '.', 'reindexObject', '(', ')', '# notify object edited event', 'event', '.', 'notify', '(', 'ObjectEditedEvent', '(', 'self', '.', 'context', ')', ')']
Handle form submission
['Handle', 'form', 'submission']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/resultsinterpretation.py#L47-L59
1,531
ethereum/py-evm
eth/vm/opcode.py
Opcode.as_opcode
def as_opcode(cls: Type[T], logic_fn: Callable[..., Any], mnemonic: str, gas_cost: int) -> Type[T]: """ Class factory method for turning vanilla functions into Opcode classes. """ if gas_cost: @functools.wraps(logic_fn) def wrapped_logic_fn(computation: 'BaseComputation') -> Any: """ Wrapper functionf or the logic function which consumes the base opcode gas cost prior to execution. """ computation.consume_gas( gas_cost, mnemonic, ) return logic_fn(computation) else: wrapped_logic_fn = logic_fn props = { '__call__': staticmethod(wrapped_logic_fn), 'mnemonic': mnemonic, 'gas_cost': gas_cost, } opcode_cls = type("opcode:{0}".format(mnemonic), (cls,), props) return opcode_cls()
python
def as_opcode(cls: Type[T], logic_fn: Callable[..., Any], mnemonic: str, gas_cost: int) -> Type[T]: """ Class factory method for turning vanilla functions into Opcode classes. """ if gas_cost: @functools.wraps(logic_fn) def wrapped_logic_fn(computation: 'BaseComputation') -> Any: """ Wrapper functionf or the logic function which consumes the base opcode gas cost prior to execution. """ computation.consume_gas( gas_cost, mnemonic, ) return logic_fn(computation) else: wrapped_logic_fn = logic_fn props = { '__call__': staticmethod(wrapped_logic_fn), 'mnemonic': mnemonic, 'gas_cost': gas_cost, } opcode_cls = type("opcode:{0}".format(mnemonic), (cls,), props) return opcode_cls()
['def', 'as_opcode', '(', 'cls', ':', 'Type', '[', 'T', ']', ',', 'logic_fn', ':', 'Callable', '[', '...', ',', 'Any', ']', ',', 'mnemonic', ':', 'str', ',', 'gas_cost', ':', 'int', ')', '->', 'Type', '[', 'T', ']', ':', 'if', 'gas_cost', ':', '@', 'functools', '.', 'wraps', '(', 'logic_fn', ')', 'def', 'wrapped_logic_fn', '(', 'computation', ':', "'BaseComputation'", ')', '->', 'Any', ':', '"""\n Wrapper functionf or the logic function which consumes the base\n opcode gas cost prior to execution.\n """', 'computation', '.', 'consume_gas', '(', 'gas_cost', ',', 'mnemonic', ',', ')', 'return', 'logic_fn', '(', 'computation', ')', 'else', ':', 'wrapped_logic_fn', '=', 'logic_fn', 'props', '=', '{', "'__call__'", ':', 'staticmethod', '(', 'wrapped_logic_fn', ')', ',', "'mnemonic'", ':', 'mnemonic', ',', "'gas_cost'", ':', 'gas_cost', ',', '}', 'opcode_cls', '=', 'type', '(', '"opcode:{0}"', '.', 'format', '(', 'mnemonic', ')', ',', '(', 'cls', ',', ')', ',', 'props', ')', 'return', 'opcode_cls', '(', ')']
Class factory method for turning vanilla functions into Opcode classes.
['Class', 'factory', 'method', 'for', 'turning', 'vanilla', 'functions', 'into', 'Opcode', 'classes', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/opcode.py#L52-L80
1,532
haikuginger/beekeeper
beekeeper/api.py
API.from_hive_file
def from_hive_file(cls, fname, *args, **kwargs): """ Open a local JSON hive file and initialize from the hive contained in that file, paying attention to the version keyword argument. """ version = kwargs.pop('version', None) require = kwargs.pop('require_https', True) return cls(Hive.from_file(fname, version, require), *args, **kwargs)
python
def from_hive_file(cls, fname, *args, **kwargs): """ Open a local JSON hive file and initialize from the hive contained in that file, paying attention to the version keyword argument. """ version = kwargs.pop('version', None) require = kwargs.pop('require_https', True) return cls(Hive.from_file(fname, version, require), *args, **kwargs)
['def', 'from_hive_file', '(', 'cls', ',', 'fname', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'version', '=', 'kwargs', '.', 'pop', '(', "'version'", ',', 'None', ')', 'require', '=', 'kwargs', '.', 'pop', '(', "'require_https'", ',', 'True', ')', 'return', 'cls', '(', 'Hive', '.', 'from_file', '(', 'fname', ',', 'version', ',', 'require', ')', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Open a local JSON hive file and initialize from the hive contained in that file, paying attention to the version keyword argument.
['Open', 'a', 'local', 'JSON', 'hive', 'file', 'and', 'initialize', 'from', 'the', 'hive', 'contained', 'in', 'that', 'file', 'paying', 'attention', 'to', 'the', 'version', 'keyword', 'argument', '.']
train
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/api.py#L244-L251
1,533
emory-libraries/eulcommon
eulcommon/binfile/eudora.py
Toc.messages
def messages(self): '''a generator yielding the :class:`Message` structures in the index''' # the file contains the fixed-size file header followed by # fixed-size message structures. start after the file header and # then simply return the message structures in sequence until the # end of the file. offset = self.LENGTH while offset < len(self.mmap): yield Message(mm=self.mmap, offset=offset) offset += Message.LENGTH
python
def messages(self): '''a generator yielding the :class:`Message` structures in the index''' # the file contains the fixed-size file header followed by # fixed-size message structures. start after the file header and # then simply return the message structures in sequence until the # end of the file. offset = self.LENGTH while offset < len(self.mmap): yield Message(mm=self.mmap, offset=offset) offset += Message.LENGTH
['def', 'messages', '(', 'self', ')', ':', '# the file contains the fixed-size file header followed by', '# fixed-size message structures. start after the file header and', '# then simply return the message structures in sequence until the', '# end of the file.', 'offset', '=', 'self', '.', 'LENGTH', 'while', 'offset', '<', 'len', '(', 'self', '.', 'mmap', ')', ':', 'yield', 'Message', '(', 'mm', '=', 'self', '.', 'mmap', ',', 'offset', '=', 'offset', ')', 'offset', '+=', 'Message', '.', 'LENGTH']
a generator yielding the :class:`Message` structures in the index
['a', 'generator', 'yielding', 'the', ':', 'class', ':', 'Message', 'structures', 'in', 'the', 'index']
train
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/binfile/eudora.py#L85-L96
1,534
log2timeline/dfvfs
dfvfs/analyzer/analyzer.py
Analyzer.GetCompressedStreamTypeIndicators
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): """Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators. """ if (cls._compressed_stream_remainder_list is None or cls._compressed_stream_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store if cls._compressed_stream_scanner is None: cls._compressed_stream_scanner = cls._GetSignatureScanner( cls._compressed_stream_store) return cls._GetTypeIndicators( cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
python
def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None): """Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators. """ if (cls._compressed_stream_remainder_list is None or cls._compressed_stream_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_COMPRESSED_STREAM) cls._compressed_stream_remainder_list = remainder_list cls._compressed_stream_store = specification_store if cls._compressed_stream_scanner is None: cls._compressed_stream_scanner = cls._GetSignatureScanner( cls._compressed_stream_store) return cls._GetTypeIndicators( cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)
['def', 'GetCompressedStreamTypeIndicators', '(', 'cls', ',', 'path_spec', ',', 'resolver_context', '=', 'None', ')', ':', 'if', '(', 'cls', '.', '_compressed_stream_remainder_list', 'is', 'None', 'or', 'cls', '.', '_compressed_stream_store', 'is', 'None', ')', ':', 'specification_store', ',', 'remainder_list', '=', 'cls', '.', '_GetSpecificationStore', '(', 'definitions', '.', 'FORMAT_CATEGORY_COMPRESSED_STREAM', ')', 'cls', '.', '_compressed_stream_remainder_list', '=', 'remainder_list', 'cls', '.', '_compressed_stream_store', '=', 'specification_store', 'if', 'cls', '.', '_compressed_stream_scanner', 'is', 'None', ':', 'cls', '.', '_compressed_stream_scanner', '=', 'cls', '.', '_GetSignatureScanner', '(', 'cls', '.', '_compressed_stream_store', ')', 'return', 'cls', '.', '_GetTypeIndicators', '(', 'cls', '.', '_compressed_stream_scanner', ',', 'cls', '.', '_compressed_stream_store', ',', 'cls', '.', '_compressed_stream_remainder_list', ',', 'path_spec', ',', 'resolver_context', '=', 'resolver_context', ')']
Determines if a file contains a supported compressed stream types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
['Determines', 'if', 'a', 'file', 'contains', 'a', 'supported', 'compressed', 'stream', 'types', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/analyzer/analyzer.py#L257-L282
1,535
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/rest_api.py
_make_token_async
def _make_token_async(scopes, service_account_id): """Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch. """ rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at))
python
def _make_token_async(scopes, service_account_id): """Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch. """ rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at))
['def', '_make_token_async', '(', 'scopes', ',', 'service_account_id', ')', ':', 'rpc', '=', 'app_identity', '.', 'create_rpc', '(', ')', 'app_identity', '.', 'make_get_access_token_call', '(', 'rpc', ',', 'scopes', ',', 'service_account_id', ')', 'token', ',', 'expires_at', '=', 'yield', 'rpc', 'raise', 'ndb', '.', 'Return', '(', '(', 'token', ',', 'expires_at', ')', ')']
Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch.
['Get', 'a', 'fresh', 'authentication', 'token', '.']
train
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/rest_api.py#L42-L56
1,536
vint21h/django-opensearch
opensearch/views.py
opensearch
def opensearch(request): """ Return opensearch.xml. """ contact_email = settings.CONTACT_EMAIL short_name = settings.SHORT_NAME description = settings.DESCRIPTION favicon_width = settings.FAVICON_WIDTH favicon_height = settings.FAVICON_HEIGHT favicon_type = settings.FAVICON_TYPE favicon_file = settings.FAVICON_FILE url = "{url}?{querystring}{{searchTerms}}".format(**{ "url": request.build_absolute_uri(reverse(settings.SEARCH_URL)), "querystring": settings.SEARCH_QUERYSTRING, }) input_encoding = settings.INPUT_ENCODING.upper() return render_to_response("opensearch/opensearch.xml", context=locals(), content_type="application/opensearchdescription+xml")
python
def opensearch(request): """ Return opensearch.xml. """ contact_email = settings.CONTACT_EMAIL short_name = settings.SHORT_NAME description = settings.DESCRIPTION favicon_width = settings.FAVICON_WIDTH favicon_height = settings.FAVICON_HEIGHT favicon_type = settings.FAVICON_TYPE favicon_file = settings.FAVICON_FILE url = "{url}?{querystring}{{searchTerms}}".format(**{ "url": request.build_absolute_uri(reverse(settings.SEARCH_URL)), "querystring": settings.SEARCH_QUERYSTRING, }) input_encoding = settings.INPUT_ENCODING.upper() return render_to_response("opensearch/opensearch.xml", context=locals(), content_type="application/opensearchdescription+xml")
['def', 'opensearch', '(', 'request', ')', ':', 'contact_email', '=', 'settings', '.', 'CONTACT_EMAIL', 'short_name', '=', 'settings', '.', 'SHORT_NAME', 'description', '=', 'settings', '.', 'DESCRIPTION', 'favicon_width', '=', 'settings', '.', 'FAVICON_WIDTH', 'favicon_height', '=', 'settings', '.', 'FAVICON_HEIGHT', 'favicon_type', '=', 'settings', '.', 'FAVICON_TYPE', 'favicon_file', '=', 'settings', '.', 'FAVICON_FILE', 'url', '=', '"{url}?{querystring}{{searchTerms}}"', '.', 'format', '(', '*', '*', '{', '"url"', ':', 'request', '.', 'build_absolute_uri', '(', 'reverse', '(', 'settings', '.', 'SEARCH_URL', ')', ')', ',', '"querystring"', ':', 'settings', '.', 'SEARCH_QUERYSTRING', ',', '}', ')', 'input_encoding', '=', 'settings', '.', 'INPUT_ENCODING', '.', 'upper', '(', ')', 'return', 'render_to_response', '(', '"opensearch/opensearch.xml"', ',', 'context', '=', 'locals', '(', ')', ',', 'content_type', '=', '"application/opensearchdescription+xml"', ')']
Return opensearch.xml.
['Return', 'opensearch', '.', 'xml', '.']
train
https://github.com/vint21h/django-opensearch/blob/4da3fa80b36f29e4ce350b3d689cda577b35421d/opensearch/views.py#L23-L41
1,537
helixyte/everest
everest/representers/traversal.py
ResourceDataVisitor.visit_member
def visit_member(self, attribute_key, attribute, member_node, member_data, is_link_node, parent_data, index=None): """ Visits a member node in a resource data tree. :param tuple attribute_key: tuple containing the attribute tokens identifying the member node's position in the resource data tree. :param attribute: mapped attribute holding information about the member node's name (in the parent) and type etc. :type attribute: :class:`everest.representers.attributes.MappedAttribute` :param member_node: the node holding resource data. This is either a resource instance (when using a :class:`ResourceTreeTraverser` on a tree of resources) or a data element instance (when using a :class:`DataElementTreeTraverser` on a data element tree. :param dict member_data: dictionary holding all member data extracted during traversal (with mapped attributes as keys). :param bool is_link_node: indicates if the given member node is a link. :param dict parent_data: dictionary holding all parent data extracted during traversal (with mapped attributes as keys). :param int index: this indicates a member node's index in a collection parent node. If the parent node is a member node, it will be `None`. """ raise NotImplementedError('Abstract method.')
python
def visit_member(self, attribute_key, attribute, member_node, member_data, is_link_node, parent_data, index=None): """ Visits a member node in a resource data tree. :param tuple attribute_key: tuple containing the attribute tokens identifying the member node's position in the resource data tree. :param attribute: mapped attribute holding information about the member node's name (in the parent) and type etc. :type attribute: :class:`everest.representers.attributes.MappedAttribute` :param member_node: the node holding resource data. This is either a resource instance (when using a :class:`ResourceTreeTraverser` on a tree of resources) or a data element instance (when using a :class:`DataElementTreeTraverser` on a data element tree. :param dict member_data: dictionary holding all member data extracted during traversal (with mapped attributes as keys). :param bool is_link_node: indicates if the given member node is a link. :param dict parent_data: dictionary holding all parent data extracted during traversal (with mapped attributes as keys). :param int index: this indicates a member node's index in a collection parent node. If the parent node is a member node, it will be `None`. """ raise NotImplementedError('Abstract method.')
['def', 'visit_member', '(', 'self', ',', 'attribute_key', ',', 'attribute', ',', 'member_node', ',', 'member_data', ',', 'is_link_node', ',', 'parent_data', ',', 'index', '=', 'None', ')', ':', 'raise', 'NotImplementedError', '(', "'Abstract method.'", ')']
Visits a member node in a resource data tree. :param tuple attribute_key: tuple containing the attribute tokens identifying the member node's position in the resource data tree. :param attribute: mapped attribute holding information about the member node's name (in the parent) and type etc. :type attribute: :class:`everest.representers.attributes.MappedAttribute` :param member_node: the node holding resource data. This is either a resource instance (when using a :class:`ResourceTreeTraverser` on a tree of resources) or a data element instance (when using a :class:`DataElementTreeTraverser` on a data element tree. :param dict member_data: dictionary holding all member data extracted during traversal (with mapped attributes as keys). :param bool is_link_node: indicates if the given member node is a link. :param dict parent_data: dictionary holding all parent data extracted during traversal (with mapped attributes as keys). :param int index: this indicates a member node's index in a collection parent node. If the parent node is a member node, it will be `None`.
['Visits', 'a', 'member', 'node', 'in', 'a', 'resource', 'data', 'tree', '.']
train
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/traversal.py#L53-L76
1,538
angr/angr
angr/knowledge_plugins/functions/function.py
Function._get_initial_name
def _get_initial_name(self): """ Determine the most suitable name of the function. :return: The initial function name. :rtype: string """ name = None addr = self.addr # Try to get a name from existing labels if self._function_manager is not None: if addr in self._function_manager._kb.labels: name = self._function_manager._kb.labels[addr] # try to get the name from a hook if name is None and self.project is not None: project = self.project if project.is_hooked(addr): hooker = project.hooked_by(addr) name = hooker.display_name elif project.simos.is_syscall_addr(addr): syscall_inst = project.simos.syscall_from_addr(addr) name = syscall_inst.display_name # generate an IDA-style sub_X name if name is None: name = 'sub_%x' % addr return name
python
def _get_initial_name(self): """ Determine the most suitable name of the function. :return: The initial function name. :rtype: string """ name = None addr = self.addr # Try to get a name from existing labels if self._function_manager is not None: if addr in self._function_manager._kb.labels: name = self._function_manager._kb.labels[addr] # try to get the name from a hook if name is None and self.project is not None: project = self.project if project.is_hooked(addr): hooker = project.hooked_by(addr) name = hooker.display_name elif project.simos.is_syscall_addr(addr): syscall_inst = project.simos.syscall_from_addr(addr) name = syscall_inst.display_name # generate an IDA-style sub_X name if name is None: name = 'sub_%x' % addr return name
['def', '_get_initial_name', '(', 'self', ')', ':', 'name', '=', 'None', 'addr', '=', 'self', '.', 'addr', '# Try to get a name from existing labels', 'if', 'self', '.', '_function_manager', 'is', 'not', 'None', ':', 'if', 'addr', 'in', 'self', '.', '_function_manager', '.', '_kb', '.', 'labels', ':', 'name', '=', 'self', '.', '_function_manager', '.', '_kb', '.', 'labels', '[', 'addr', ']', '# try to get the name from a hook', 'if', 'name', 'is', 'None', 'and', 'self', '.', 'project', 'is', 'not', 'None', ':', 'project', '=', 'self', '.', 'project', 'if', 'project', '.', 'is_hooked', '(', 'addr', ')', ':', 'hooker', '=', 'project', '.', 'hooked_by', '(', 'addr', ')', 'name', '=', 'hooker', '.', 'display_name', 'elif', 'project', '.', 'simos', '.', 'is_syscall_addr', '(', 'addr', ')', ':', 'syscall_inst', '=', 'project', '.', 'simos', '.', 'syscall_from_addr', '(', 'addr', ')', 'name', '=', 'syscall_inst', '.', 'display_name', '# generate an IDA-style sub_X name', 'if', 'name', 'is', 'None', ':', 'name', '=', "'sub_%x'", '%', 'addr', 'return', 'name']
Determine the most suitable name of the function. :return: The initial function name. :rtype: string
['Determine', 'the', 'most', 'suitable', 'name', 'of', 'the', 'function', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/knowledge_plugins/functions/function.py#L687-L717
1,539
tanghaibao/jcvi
jcvi/apps/cap3.py
prepare
def prepare(args): """ %prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly. """ from operator import itemgetter from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(prepare.__doc__) p.add_option("--rearray_lib", default=None, help="name of the rearrayed library [default: %default]") p.add_option("--orig_lib_file", help="fasta file containing reads from the original libraries [default: %default]") g = OptionGroup(p, "Optional parameters") g.add_option("--output_folder", default="to_assemble", help="output folder to write the FASTA files to [default: %default]") p.add_option_group(g) opts, args = p.parse_args(args) if not opts.rearray_lib or not opts.orig_lib_file: logging.error("Please specify the required parameters") sys.exit(not p.print_help()) rearraylib, origlibfile = opts.rearray_lib, opts.orig_lib_file if not op.isfile(origlibfile): logging.error("Original library reads file `{0}` does not exist!".format(origlibfile)) sys.exit() lookuptblfile = rearraylib + '.lookup' logging.debug(lookuptblfile) if not op.isfile(lookuptblfile): logging.error("Lookup table file `{0}` does not exist!".format(lookuptblfile)) sys.exit() rearraylibfile = rearraylib + '.fasta' logging.debug(rearraylibfile) if not op.isfile(rearraylibfile): logging.error("Rearrayed library reads file `{0}` does not exist!".format(rearraylibfile)) sys.exit() origlibFasta = Fasta(origlibfile) rearraylibFasta = Fasta(rearraylibfile) origlibids = [o for o in origlibFasta.iterkeys_ordered()] rearraylibids = [r for r in rearraylibFasta.iterkeys_ordered()] if not op.isdir(opts.output_folder): logging.warning("Output directory `{0}` missing. Creating it now...".format(opts.output_folder)) os.makedirs(opts.output_folder) logfile = rearraylib + '.log' log = open(logfile, 'w') fp = open(lookuptblfile, 'r') for row in fp: origprefix, rearrayprefix = itemgetter(0,3)(row.split('\t')) libpair = origprefix + '_' + rearrayprefix outfile = opts.output_folder + '/' + libpair + '.fasta' ofp = open(outfile, 'w') for o in origlibids: if re.match(origprefix, o): SeqIO.write(origlibFasta[o], ofp, 'fasta') for r in rearraylibids: if re.match(rearrayprefix, r): SeqIO.write(rearraylibFasta[r], ofp, 'fasta') ofp.close() print(outfile, file=log) log.close() logging.debug('Wrote log file `{0}`'.format(logfile))
python
def prepare(args): """ %prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly. """ from operator import itemgetter from jcvi.formats.fasta import Fasta, SeqIO p = OptionParser(prepare.__doc__) p.add_option("--rearray_lib", default=None, help="name of the rearrayed library [default: %default]") p.add_option("--orig_lib_file", help="fasta file containing reads from the original libraries [default: %default]") g = OptionGroup(p, "Optional parameters") g.add_option("--output_folder", default="to_assemble", help="output folder to write the FASTA files to [default: %default]") p.add_option_group(g) opts, args = p.parse_args(args) if not opts.rearray_lib or not opts.orig_lib_file: logging.error("Please specify the required parameters") sys.exit(not p.print_help()) rearraylib, origlibfile = opts.rearray_lib, opts.orig_lib_file if not op.isfile(origlibfile): logging.error("Original library reads file `{0}` does not exist!".format(origlibfile)) sys.exit() lookuptblfile = rearraylib + '.lookup' logging.debug(lookuptblfile) if not op.isfile(lookuptblfile): logging.error("Lookup table file `{0}` does not exist!".format(lookuptblfile)) sys.exit() rearraylibfile = rearraylib + '.fasta' logging.debug(rearraylibfile) if not op.isfile(rearraylibfile): logging.error("Rearrayed library reads file `{0}` does not exist!".format(rearraylibfile)) sys.exit() origlibFasta = Fasta(origlibfile) rearraylibFasta = Fasta(rearraylibfile) origlibids = [o for o in origlibFasta.iterkeys_ordered()] rearraylibids = [r for r in rearraylibFasta.iterkeys_ordered()] if not op.isdir(opts.output_folder): logging.warning("Output directory `{0}` missing. Creating it now...".format(opts.output_folder)) os.makedirs(opts.output_folder) logfile = rearraylib + '.log' log = open(logfile, 'w') fp = open(lookuptblfile, 'r') for row in fp: origprefix, rearrayprefix = itemgetter(0,3)(row.split('\t')) libpair = origprefix + '_' + rearrayprefix outfile = opts.output_folder + '/' + libpair + '.fasta' ofp = open(outfile, 'w') for o in origlibids: if re.match(origprefix, o): SeqIO.write(origlibFasta[o], ofp, 'fasta') for r in rearraylibids: if re.match(rearrayprefix, r): SeqIO.write(rearraylibFasta[r], ofp, 'fasta') ofp.close() print(outfile, file=log) log.close() logging.debug('Wrote log file `{0}`'.format(logfile))
['def', 'prepare', '(', 'args', ')', ':', 'from', 'operator', 'import', 'itemgetter', 'from', 'jcvi', '.', 'formats', '.', 'fasta', 'import', 'Fasta', ',', 'SeqIO', 'p', '=', 'OptionParser', '(', 'prepare', '.', '__doc__', ')', 'p', '.', 'add_option', '(', '"--rearray_lib"', ',', 'default', '=', 'None', ',', 'help', '=', '"name of the rearrayed library [default: %default]"', ')', 'p', '.', 'add_option', '(', '"--orig_lib_file"', ',', 'help', '=', '"fasta file containing reads from the original libraries [default: %default]"', ')', 'g', '=', 'OptionGroup', '(', 'p', ',', '"Optional parameters"', ')', 'g', '.', 'add_option', '(', '"--output_folder"', ',', 'default', '=', '"to_assemble"', ',', 'help', '=', '"output folder to write the FASTA files to [default: %default]"', ')', 'p', '.', 'add_option_group', '(', 'g', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'not', 'opts', '.', 'rearray_lib', 'or', 'not', 'opts', '.', 'orig_lib_file', ':', 'logging', '.', 'error', '(', '"Please specify the required parameters"', ')', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'rearraylib', ',', 'origlibfile', '=', 'opts', '.', 'rearray_lib', ',', 'opts', '.', 'orig_lib_file', 'if', 'not', 'op', '.', 'isfile', '(', 'origlibfile', ')', ':', 'logging', '.', 'error', '(', '"Original library reads file `{0}` does not exist!"', '.', 'format', '(', 'origlibfile', ')', ')', 'sys', '.', 'exit', '(', ')', 'lookuptblfile', '=', 'rearraylib', '+', "'.lookup'", 'logging', '.', 'debug', '(', 'lookuptblfile', ')', 'if', 'not', 'op', '.', 'isfile', '(', 'lookuptblfile', ')', ':', 'logging', '.', 'error', '(', '"Lookup table file `{0}` does not exist!"', '.', 'format', '(', 'lookuptblfile', ')', ')', 'sys', '.', 'exit', '(', ')', 'rearraylibfile', '=', 'rearraylib', '+', "'.fasta'", 'logging', '.', 'debug', '(', 'rearraylibfile', ')', 'if', 'not', 'op', '.', 'isfile', '(', 'rearraylibfile', ')', ':', 'logging', '.', 'error', '(', '"Rearrayed library reads file `{0}` does not exist!"', '.', 'format', '(', 'rearraylibfile', ')', ')', 'sys', '.', 'exit', '(', ')', 'origlibFasta', '=', 'Fasta', '(', 'origlibfile', ')', 'rearraylibFasta', '=', 'Fasta', '(', 'rearraylibfile', ')', 'origlibids', '=', '[', 'o', 'for', 'o', 'in', 'origlibFasta', '.', 'iterkeys_ordered', '(', ')', ']', 'rearraylibids', '=', '[', 'r', 'for', 'r', 'in', 'rearraylibFasta', '.', 'iterkeys_ordered', '(', ')', ']', 'if', 'not', 'op', '.', 'isdir', '(', 'opts', '.', 'output_folder', ')', ':', 'logging', '.', 'warning', '(', '"Output directory `{0}` missing. Creating it now..."', '.', 'format', '(', 'opts', '.', 'output_folder', ')', ')', 'os', '.', 'makedirs', '(', 'opts', '.', 'output_folder', ')', 'logfile', '=', 'rearraylib', '+', "'.log'", 'log', '=', 'open', '(', 'logfile', ',', "'w'", ')', 'fp', '=', 'open', '(', 'lookuptblfile', ',', "'r'", ')', 'for', 'row', 'in', 'fp', ':', 'origprefix', ',', 'rearrayprefix', '=', 'itemgetter', '(', '0', ',', '3', ')', '(', 'row', '.', 'split', '(', "'\\t'", ')', ')', 'libpair', '=', 'origprefix', '+', "'_'", '+', 'rearrayprefix', 'outfile', '=', 'opts', '.', 'output_folder', '+', "'/'", '+', 'libpair', '+', "'.fasta'", 'ofp', '=', 'open', '(', 'outfile', ',', "'w'", ')', 'for', 'o', 'in', 'origlibids', ':', 'if', 're', '.', 'match', '(', 'origprefix', ',', 'o', ')', ':', 'SeqIO', '.', 'write', '(', 'origlibFasta', '[', 'o', ']', ',', 'ofp', ',', "'fasta'", ')', 'for', 'r', 'in', 'rearraylibids', ':', 'if', 're', '.', 'match', '(', 'rearrayprefix', ',', 'r', ')', ':', 'SeqIO', '.', 'write', '(', 'rearraylibFasta', '[', 'r', ']', ',', 'ofp', ',', "'fasta'", ')', 'ofp', '.', 'close', '(', ')', 'print', '(', 'outfile', ',', 'file', '=', 'log', ')', 'log', '.', 'close', '(', ')', 'logging', '.', 'debug', '(', "'Wrote log file `{0}`'", '.', 'format', '(', 'logfile', ')', ')']
%prog prepare --rearray_lib=<rearraylibrary> --orig_lib_file=<origlibfile> Inferred file names --------------------------------------------- `lookuptblfile` : rearraylibrary.lookup `rearraylibfile`: rearraylibrary.fasta Pick sequences from the original library file and the rearrayed library file based on the mapping information provided in the `lookuptblfile`. # lookuptblfile format: column number (index) # 1 (0) 2 (1) 3 (2) 4 (3) 5 (4) 6 (5) # source_clone source_plate source_well dest_clone dest_plate dest_well The 1st and 4th column in the `lookuptblfile` form the pair of clones which constitute the elements used for the per-clone assembly.
['%prog', 'prepare', '--', 'rearray_lib', '=', '<rearraylibrary', '>', '--', 'orig_lib_file', '=', '<origlibfile', '>']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/cap3.py#L32-L121
1,540
DistrictDataLabs/yellowbrick
yellowbrick/utils/helpers.py
prop_to_size
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False): """ Converts an array of property values (e.g. a metric or score) to values that are more useful for marker sizes, line widths, or other visual sizes. The new sizes are computed as: y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power} If ``log=True``, the natural logarithm of the property values is used instead. Parameters ---------- prop : array-like, 1D An array of values of the property to scale between the size range. mi : float, default: 0.0 The size to assign the smallest property (minimum size value). ma : float, default: 5.0 The size to assign the largest property (maximum size value). power : float, default: 0.5 Used to control how rapidly the size increases from smallest to largest. log : bool, default: False Use the natural logarithm to compute the property sizes Returns ------- sizes : array, 1D The new size values, in the same shape as the input prop array """ # ensure that prop is an array vals = np.asarray(vals) # apply natural log if specified if log: vals = np.log(vals) # avoid division by zero error delta = vals.max() - vals.min() if delta == 0.0: delta = 1.0 return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
python
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False): """ Converts an array of property values (e.g. a metric or score) to values that are more useful for marker sizes, line widths, or other visual sizes. The new sizes are computed as: y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power} If ``log=True``, the natural logarithm of the property values is used instead. Parameters ---------- prop : array-like, 1D An array of values of the property to scale between the size range. mi : float, default: 0.0 The size to assign the smallest property (minimum size value). ma : float, default: 5.0 The size to assign the largest property (maximum size value). power : float, default: 0.5 Used to control how rapidly the size increases from smallest to largest. log : bool, default: False Use the natural logarithm to compute the property sizes Returns ------- sizes : array, 1D The new size values, in the same shape as the input prop array """ # ensure that prop is an array vals = np.asarray(vals) # apply natural log if specified if log: vals = np.log(vals) # avoid division by zero error delta = vals.max() - vals.min() if delta == 0.0: delta = 1.0 return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
['def', 'prop_to_size', '(', 'vals', ',', 'mi', '=', '0.0', ',', 'ma', '=', '5.0', ',', 'power', '=', '0.5', ',', 'log', '=', 'False', ')', ':', '# ensure that prop is an array', 'vals', '=', 'np', '.', 'asarray', '(', 'vals', ')', '# apply natural log if specified', 'if', 'log', ':', 'vals', '=', 'np', '.', 'log', '(', 'vals', ')', '# avoid division by zero error', 'delta', '=', 'vals', '.', 'max', '(', ')', '-', 'vals', '.', 'min', '(', ')', 'if', 'delta', '==', '0.0', ':', 'delta', '=', '1.0', 'return', 'mi', '+', '(', 'ma', '-', 'mi', ')', '*', '(', '(', 'vals', '-', 'vals', '.', 'min', '(', ')', ')', '/', 'delta', ')', '**', 'power']
Converts an array of property values (e.g. a metric or score) to values that are more useful for marker sizes, line widths, or other visual sizes. The new sizes are computed as: y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power} If ``log=True``, the natural logarithm of the property values is used instead. Parameters ---------- prop : array-like, 1D An array of values of the property to scale between the size range. mi : float, default: 0.0 The size to assign the smallest property (minimum size value). ma : float, default: 5.0 The size to assign the largest property (maximum size value). power : float, default: 0.5 Used to control how rapidly the size increases from smallest to largest. log : bool, default: False Use the natural logarithm to compute the property sizes Returns ------- sizes : array, 1D The new size values, in the same shape as the input prop array
['Converts', 'an', 'array', 'of', 'property', 'values', '(', 'e', '.', 'g', '.', 'a', 'metric', 'or', 'score', ')', 'to', 'values', 'that', 'are', 'more', 'useful', 'for', 'marker', 'sizes', 'line', 'widths', 'or', 'other', 'visual', 'sizes', '.', 'The', 'new', 'sizes', 'are', 'computed', 'as', ':']
train
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/helpers.py#L135-L179
1,541
ivanlei/threatbutt
threatbutt/threatbutt.py
ThreatButt.bespoke_md5
def bespoke_md5(self, md5): """Performs Bespoke MD5 lookup on an MD5. Args: md5 - A hash. """ r = requests.post('http://threatbutt.io/api/md5/{0}'.format(md5)) self._output(r.text)
python
def bespoke_md5(self, md5): """Performs Bespoke MD5 lookup on an MD5. Args: md5 - A hash. """ r = requests.post('http://threatbutt.io/api/md5/{0}'.format(md5)) self._output(r.text)
['def', 'bespoke_md5', '(', 'self', ',', 'md5', ')', ':', 'r', '=', 'requests', '.', 'post', '(', "'http://threatbutt.io/api/md5/{0}'", '.', 'format', '(', 'md5', ')', ')', 'self', '.', '_output', '(', 'r', '.', 'text', ')']
Performs Bespoke MD5 lookup on an MD5. Args: md5 - A hash.
['Performs', 'Bespoke', 'MD5', 'lookup', 'on', 'an', 'MD5', '.']
train
https://github.com/ivanlei/threatbutt/blob/faff507a4bebfa585d3044427111418c257c34ec/threatbutt/threatbutt.py#L29-L36
1,542
uyar/pygenstub
pygenstub.py
main
def main(argv=None): """Start the command line interface.""" parser = ArgumentParser(prog="pygenstub") parser.add_argument("--version", action="version", version="%(prog)s " + __version__) parser.add_argument("files", nargs="*", help="generate stubs for given files") parser.add_argument( "-m", "--module", action="append", metavar="MODULE", dest="modules", default=[], help="generate stubs for given modules", ) parser.add_argument( "-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory" ) parser.add_argument( "--generic", action="store_true", default=False, help="generate generic stubs" ) parser.add_argument("--debug", action="store_true", help="enable debug messages") argv = argv if argv is not None else sys.argv arguments = parser.parse_args(argv[1:]) # set debug mode if arguments.debug: logging.basicConfig(level=logging.DEBUG) _logger.debug("running in debug mode") out_dir = arguments.out_dir if arguments.out_dir is not None else "" if (out_dir == "") and (len(arguments.modules) > 0): print("Output directory must be given when generating stubs for modules.") sys.exit(1) modules = [] for path in arguments.files: paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)] for source in paths: if str(source).startswith(os.path.pardir): source = source.absolute().resolve() if (out_dir != "") and source.is_absolute(): source = source.relative_to(source.root) destination = Path(out_dir, source.with_suffix(".pyi")) modules.append((source, destination)) for mod_name in arguments.modules: modules.extend(get_pkg_paths(mod_name, out_dir)) for source, destination in modules: _logger.info("generating stub for %s to path %s", source, destination) with source.open() as f: code = f.read() try: stub = get_stub(code, generic=arguments.generic) except Exception as e: print(source, "-", e, file=sys.stderr) continue if stub != "": if not destination.parent.exists(): destination.parent.mkdir(parents=True) with destination.open("w") as f: f.write("# " + EDIT_WARNING + "\n\n" + stub)
python
def main(argv=None): """Start the command line interface.""" parser = ArgumentParser(prog="pygenstub") parser.add_argument("--version", action="version", version="%(prog)s " + __version__) parser.add_argument("files", nargs="*", help="generate stubs for given files") parser.add_argument( "-m", "--module", action="append", metavar="MODULE", dest="modules", default=[], help="generate stubs for given modules", ) parser.add_argument( "-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory" ) parser.add_argument( "--generic", action="store_true", default=False, help="generate generic stubs" ) parser.add_argument("--debug", action="store_true", help="enable debug messages") argv = argv if argv is not None else sys.argv arguments = parser.parse_args(argv[1:]) # set debug mode if arguments.debug: logging.basicConfig(level=logging.DEBUG) _logger.debug("running in debug mode") out_dir = arguments.out_dir if arguments.out_dir is not None else "" if (out_dir == "") and (len(arguments.modules) > 0): print("Output directory must be given when generating stubs for modules.") sys.exit(1) modules = [] for path in arguments.files: paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)] for source in paths: if str(source).startswith(os.path.pardir): source = source.absolute().resolve() if (out_dir != "") and source.is_absolute(): source = source.relative_to(source.root) destination = Path(out_dir, source.with_suffix(".pyi")) modules.append((source, destination)) for mod_name in arguments.modules: modules.extend(get_pkg_paths(mod_name, out_dir)) for source, destination in modules: _logger.info("generating stub for %s to path %s", source, destination) with source.open() as f: code = f.read() try: stub = get_stub(code, generic=arguments.generic) except Exception as e: print(source, "-", e, file=sys.stderr) continue if stub != "": if not destination.parent.exists(): destination.parent.mkdir(parents=True) with destination.open("w") as f: f.write("# " + EDIT_WARNING + "\n\n" + stub)
['def', 'main', '(', 'argv', '=', 'None', ')', ':', 'parser', '=', 'ArgumentParser', '(', 'prog', '=', '"pygenstub"', ')', 'parser', '.', 'add_argument', '(', '"--version"', ',', 'action', '=', '"version"', ',', 'version', '=', '"%(prog)s "', '+', '__version__', ')', 'parser', '.', 'add_argument', '(', '"files"', ',', 'nargs', '=', '"*"', ',', 'help', '=', '"generate stubs for given files"', ')', 'parser', '.', 'add_argument', '(', '"-m"', ',', '"--module"', ',', 'action', '=', '"append"', ',', 'metavar', '=', '"MODULE"', ',', 'dest', '=', '"modules"', ',', 'default', '=', '[', ']', ',', 'help', '=', '"generate stubs for given modules"', ',', ')', 'parser', '.', 'add_argument', '(', '"-o"', ',', '"--output"', ',', 'metavar', '=', '"PATH"', ',', 'dest', '=', '"out_dir"', ',', 'help', '=', '"change the output directory"', ')', 'parser', '.', 'add_argument', '(', '"--generic"', ',', 'action', '=', '"store_true"', ',', 'default', '=', 'False', ',', 'help', '=', '"generate generic stubs"', ')', 'parser', '.', 'add_argument', '(', '"--debug"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"enable debug messages"', ')', 'argv', '=', 'argv', 'if', 'argv', 'is', 'not', 'None', 'else', 'sys', '.', 'argv', 'arguments', '=', 'parser', '.', 'parse_args', '(', 'argv', '[', '1', ':', ']', ')', '# set debug mode', 'if', 'arguments', '.', 'debug', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'DEBUG', ')', '_logger', '.', 'debug', '(', '"running in debug mode"', ')', 'out_dir', '=', 'arguments', '.', 'out_dir', 'if', 'arguments', '.', 'out_dir', 'is', 'not', 'None', 'else', '""', 'if', '(', 'out_dir', '==', '""', ')', 'and', '(', 'len', '(', 'arguments', '.', 'modules', ')', '>', '0', ')', ':', 'print', '(', '"Output directory must be given when generating stubs for modules."', ')', 'sys', '.', 'exit', '(', '1', ')', 'modules', '=', '[', ']', 'for', 'path', 'in', 'arguments', '.', 'files', ':', 'paths', '=', 'Path', '(', 'path', ')', '.', 'glob', '(', '"**/*.py"', ')', 'if', 'Path', '(', 'path', ')', '.', 'is_dir', '(', ')', 'else', '[', 'Path', '(', 'path', ')', ']', 'for', 'source', 'in', 'paths', ':', 'if', 'str', '(', 'source', ')', '.', 'startswith', '(', 'os', '.', 'path', '.', 'pardir', ')', ':', 'source', '=', 'source', '.', 'absolute', '(', ')', '.', 'resolve', '(', ')', 'if', '(', 'out_dir', '!=', '""', ')', 'and', 'source', '.', 'is_absolute', '(', ')', ':', 'source', '=', 'source', '.', 'relative_to', '(', 'source', '.', 'root', ')', 'destination', '=', 'Path', '(', 'out_dir', ',', 'source', '.', 'with_suffix', '(', '".pyi"', ')', ')', 'modules', '.', 'append', '(', '(', 'source', ',', 'destination', ')', ')', 'for', 'mod_name', 'in', 'arguments', '.', 'modules', ':', 'modules', '.', 'extend', '(', 'get_pkg_paths', '(', 'mod_name', ',', 'out_dir', ')', ')', 'for', 'source', ',', 'destination', 'in', 'modules', ':', '_logger', '.', 'info', '(', '"generating stub for %s to path %s"', ',', 'source', ',', 'destination', ')', 'with', 'source', '.', 'open', '(', ')', 'as', 'f', ':', 'code', '=', 'f', '.', 'read', '(', ')', 'try', ':', 'stub', '=', 'get_stub', '(', 'code', ',', 'generic', '=', 'arguments', '.', 'generic', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', 'source', ',', '"-"', ',', 'e', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'continue', 'if', 'stub', '!=', '""', ':', 'if', 'not', 'destination', '.', 'parent', '.', 'exists', '(', ')', ':', 'destination', '.', 'parent', '.', 'mkdir', '(', 'parents', '=', 'True', ')', 'with', 'destination', '.', 'open', '(', '"w"', ')', 'as', 'f', ':', 'f', '.', 'write', '(', '"# "', '+', 'EDIT_WARNING', '+', '"\\n\\n"', '+', 'stub', ')']
Start the command line interface.
['Start', 'the', 'command', 'line', 'interface', '.']
train
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L888-L951
1,543
NVIDIA/pynvrtc
pynvrtc/interface.py
NVRTCInterface.nvrtcGetLoweredName
def nvrtcGetLoweredName(self, prog, name_expression): """ Notes the given name expression denoting a __global__ function or function template instantiation. """ lowered_name = c_char_p() code = self._lib.nvrtcGetLoweredName(prog, c_char_p(encode_str(name_expression)), byref(lowered_name)) self._throw_on_error(code) return lowered_name.value.decode('utf-8')
python
def nvrtcGetLoweredName(self, prog, name_expression): """ Notes the given name expression denoting a __global__ function or function template instantiation. """ lowered_name = c_char_p() code = self._lib.nvrtcGetLoweredName(prog, c_char_p(encode_str(name_expression)), byref(lowered_name)) self._throw_on_error(code) return lowered_name.value.decode('utf-8')
['def', 'nvrtcGetLoweredName', '(', 'self', ',', 'prog', ',', 'name_expression', ')', ':', 'lowered_name', '=', 'c_char_p', '(', ')', 'code', '=', 'self', '.', '_lib', '.', 'nvrtcGetLoweredName', '(', 'prog', ',', 'c_char_p', '(', 'encode_str', '(', 'name_expression', ')', ')', ',', 'byref', '(', 'lowered_name', ')', ')', 'self', '.', '_throw_on_error', '(', 'code', ')', 'return', 'lowered_name', '.', 'value', '.', 'decode', '(', "'utf-8'", ')']
Notes the given name expression denoting a __global__ function or function template instantiation.
['Notes', 'the', 'given', 'name', 'expression', 'denoting', 'a', '__global__', 'function', 'or', 'function', 'template', 'instantiation', '.']
train
https://github.com/NVIDIA/pynvrtc/blob/fffa9f6f4a7ee1d452346cbdf68b84b5246ccffb/pynvrtc/interface.py#L265-L275
1,544
benfred/implicit
implicit/als.py
least_squares
def least_squares(Cui, X, Y, regularization, num_threads=0): """ For each user in Cui, calculate factors Xu for them using least squares on Y. Note: this is at least 10 times slower than the cython version included here. """ users, n_factors = X.shape YtY = Y.T.dot(Y) for u in range(users): X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors)
python
def least_squares(Cui, X, Y, regularization, num_threads=0): """ For each user in Cui, calculate factors Xu for them using least squares on Y. Note: this is at least 10 times slower than the cython version included here. """ users, n_factors = X.shape YtY = Y.T.dot(Y) for u in range(users): X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors)
['def', 'least_squares', '(', 'Cui', ',', 'X', ',', 'Y', ',', 'regularization', ',', 'num_threads', '=', '0', ')', ':', 'users', ',', 'n_factors', '=', 'X', '.', 'shape', 'YtY', '=', 'Y', '.', 'T', '.', 'dot', '(', 'Y', ')', 'for', 'u', 'in', 'range', '(', 'users', ')', ':', 'X', '[', 'u', ']', '=', 'user_factor', '(', 'Y', ',', 'YtY', ',', 'Cui', ',', 'u', ',', 'regularization', ',', 'n_factors', ')']
For each user in Cui, calculate factors Xu for them using least squares on Y. Note: this is at least 10 times slower than the cython version included here.
['For', 'each', 'user', 'in', 'Cui', 'calculate', 'factors', 'Xu', 'for', 'them', 'using', 'least', 'squares', 'on', 'Y', '.']
train
https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/implicit/als.py#L311-L322
1,545
apache/incubator-mxnet
python/mxnet/name.py
NameManager.get
def get(self, name, hint): """Get the canonical name for a symbol. This is the default implementation. If the user specifies a name, the user-specified name will be used. When user does not specify a name, we automatically generate a name based on the hint string. Parameters ---------- name : str or None The name specified by the user. hint : str A hint string, which can be used to generate name. Returns ------- full_name : str A canonical name for the symbol. """ if name: return name if hint not in self._counter: self._counter[hint] = 0 name = '%s%d' % (hint, self._counter[hint]) self._counter[hint] += 1 return name
python
def get(self, name, hint): """Get the canonical name for a symbol. This is the default implementation. If the user specifies a name, the user-specified name will be used. When user does not specify a name, we automatically generate a name based on the hint string. Parameters ---------- name : str or None The name specified by the user. hint : str A hint string, which can be used to generate name. Returns ------- full_name : str A canonical name for the symbol. """ if name: return name if hint not in self._counter: self._counter[hint] = 0 name = '%s%d' % (hint, self._counter[hint]) self._counter[hint] += 1 return name
['def', 'get', '(', 'self', ',', 'name', ',', 'hint', ')', ':', 'if', 'name', ':', 'return', 'name', 'if', 'hint', 'not', 'in', 'self', '.', '_counter', ':', 'self', '.', '_counter', '[', 'hint', ']', '=', '0', 'name', '=', "'%s%d'", '%', '(', 'hint', ',', 'self', '.', '_counter', '[', 'hint', ']', ')', 'self', '.', '_counter', '[', 'hint', ']', '+=', '1', 'return', 'name']
Get the canonical name for a symbol. This is the default implementation. If the user specifies a name, the user-specified name will be used. When user does not specify a name, we automatically generate a name based on the hint string. Parameters ---------- name : str or None The name specified by the user. hint : str A hint string, which can be used to generate name. Returns ------- full_name : str A canonical name for the symbol.
['Get', 'the', 'canonical', 'name', 'for', 'a', 'symbol', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/name.py#L36-L65
1,546
hobson/pug-dj
pug/dj/crawlnmine/management/__init__.py
ManagementUtility.execute
def execute(self): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ try: subcommand = self.argv[1] except IndexError: subcommand = 'help' # Display help if no arguments were given. # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False) parser.add_argument('--settings') parser.add_argument('--pythonpath') parser.add_argument('args', nargs='*') # catch-all try: options, args = parser.parse_known_args(self.argv[2:]) handle_default_options(options) except CommandError: pass # Ignore any option errors at this point. no_settings_commands = [ 'help', 'version', '--help', '--version', '-h', 'compilemessages', 'makemessages', 'startapp', 'startproject', ] try: settings.INSTALLED_APPS except ImproperlyConfigured as exc: self.settings_exception = exc # A handful of built-in management commands work without settings. # Load the default settings -- where INSTALLED_APPS is empty. if subcommand in no_settings_commands: settings.configure() if settings.configured: django.setup() self.autocomplete() if subcommand == 'help': if '--commands' in args: sys.stdout.write(self.main_help_text(commands_only=True) + '\n') elif len(options.args) < 1: sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0]) # Special cases for 'jira.py --version' and 'jira.py --help' to work. elif subcommand == 'version' or self.argv[1:] == ['--version']: sys.stdout.write(django.get_version() + '\n') elif self.argv[1:] in (['--help'], ['-h']): sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(subcommand).run_from_argv(self.argv)
python
def execute(self): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ try: subcommand = self.argv[1] except IndexError: subcommand = 'help' # Display help if no arguments were given. # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False) parser.add_argument('--settings') parser.add_argument('--pythonpath') parser.add_argument('args', nargs='*') # catch-all try: options, args = parser.parse_known_args(self.argv[2:]) handle_default_options(options) except CommandError: pass # Ignore any option errors at this point. no_settings_commands = [ 'help', 'version', '--help', '--version', '-h', 'compilemessages', 'makemessages', 'startapp', 'startproject', ] try: settings.INSTALLED_APPS except ImproperlyConfigured as exc: self.settings_exception = exc # A handful of built-in management commands work without settings. # Load the default settings -- where INSTALLED_APPS is empty. if subcommand in no_settings_commands: settings.configure() if settings.configured: django.setup() self.autocomplete() if subcommand == 'help': if '--commands' in args: sys.stdout.write(self.main_help_text(commands_only=True) + '\n') elif len(options.args) < 1: sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0]) # Special cases for 'jira.py --version' and 'jira.py --help' to work. elif subcommand == 'version' or self.argv[1:] == ['--version']: sys.stdout.write(django.get_version() + '\n') elif self.argv[1:] in (['--help'], ['-h']): sys.stdout.write(self.main_help_text() + '\n') else: self.fetch_command(subcommand).run_from_argv(self.argv)
['def', 'execute', '(', 'self', ')', ':', 'try', ':', 'subcommand', '=', 'self', '.', 'argv', '[', '1', ']', 'except', 'IndexError', ':', 'subcommand', '=', "'help'", '# Display help if no arguments were given.', '# Preprocess options to extract --settings and --pythonpath.', '# These options could affect the commands that are available, so they', '# must be processed early.', 'parser', '=', 'CommandParser', '(', 'None', ',', 'usage', '=', '"%(prog)s subcommand [options] [args]"', ',', 'add_help', '=', 'False', ')', 'parser', '.', 'add_argument', '(', "'--settings'", ')', 'parser', '.', 'add_argument', '(', "'--pythonpath'", ')', 'parser', '.', 'add_argument', '(', "'args'", ',', 'nargs', '=', "'*'", ')', '# catch-all', 'try', ':', 'options', ',', 'args', '=', 'parser', '.', 'parse_known_args', '(', 'self', '.', 'argv', '[', '2', ':', ']', ')', 'handle_default_options', '(', 'options', ')', 'except', 'CommandError', ':', 'pass', '# Ignore any option errors at this point.', 'no_settings_commands', '=', '[', "'help'", ',', "'version'", ',', "'--help'", ',', "'--version'", ',', "'-h'", ',', "'compilemessages'", ',', "'makemessages'", ',', "'startapp'", ',', "'startproject'", ',', ']', 'try', ':', 'settings', '.', 'INSTALLED_APPS', 'except', 'ImproperlyConfigured', 'as', 'exc', ':', 'self', '.', 'settings_exception', '=', 'exc', '# A handful of built-in management commands work without settings.', '# Load the default settings -- where INSTALLED_APPS is empty.', 'if', 'subcommand', 'in', 'no_settings_commands', ':', 'settings', '.', 'configure', '(', ')', 'if', 'settings', '.', 'configured', ':', 'django', '.', 'setup', '(', ')', 'self', '.', 'autocomplete', '(', ')', 'if', 'subcommand', '==', "'help'", ':', 'if', "'--commands'", 'in', 'args', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'self', '.', 'main_help_text', '(', 'commands_only', '=', 'True', ')', '+', "'\\n'", ')', 'elif', 'len', '(', 'options', '.', 'args', ')', '<', '1', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'self', '.', 'main_help_text', '(', ')', '+', "'\\n'", ')', 'else', ':', 'self', '.', 'fetch_command', '(', 'options', '.', 'args', '[', '0', ']', ')', '.', 'print_help', '(', 'self', '.', 'prog_name', ',', 'options', '.', 'args', '[', '0', ']', ')', "# Special cases for 'jira.py --version' and 'jira.py --help' to work.", 'elif', 'subcommand', '==', "'version'", 'or', 'self', '.', 'argv', '[', '1', ':', ']', '==', '[', "'--version'", ']', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'django', '.', 'get_version', '(', ')', '+', "'\\n'", ')', 'elif', 'self', '.', 'argv', '[', '1', ':', ']', 'in', '(', '[', "'--help'", ']', ',', '[', "'-h'", ']', ')', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'self', '.', 'main_help_text', '(', ')', '+', "'\\n'", ')', 'else', ':', 'self', '.', 'fetch_command', '(', 'subcommand', ')', '.', 'run_from_argv', '(', 'self', '.', 'argv', ')']
Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it.
['Given', 'the', 'command', '-', 'line', 'arguments', 'this', 'figures', 'out', 'which', 'subcommand', 'is', 'being', 'run', 'creates', 'a', 'parser', 'appropriate', 'to', 'that', 'command', 'and', 'runs', 'it', '.']
train
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/management/__init__.py#L269-L325
1,547
econ-ark/HARK
HARK/utilities.py
approxLognormal
def approxLognormal(N, mu=0.0, sigma=1.0, tail_N=0, tail_bound=[0.02,0.98], tail_order=np.e): ''' Construct a discrete approximation to a lognormal distribution with underlying normal distribution N(mu,sigma). Makes an equiprobable distribution by default, but user can optionally request augmented tails with exponentially sized point masses. This can improve solution accuracy in some models. Parameters ---------- N: int Number of discrete points in the "main part" of the approximation. mu: float Mean of underlying normal distribution. sigma: float Standard deviation of underlying normal distribution. tail_N: int Number of points in each "tail part" of the approximation; 0 = no tail. tail_bound: [float] CDF boundaries of the tails vs main portion; tail_bound[0] is the lower tail bound, tail_bound[1] is the upper tail bound. Inoperative when tail_N = 0. Can make "one tailed" approximations with 0.0 or 1.0. tail_order: float Factor by which consecutive point masses in a "tail part" differ in probability. Should be >= 1 for sensible spacing. Returns ------- pmf: np.ndarray Probabilities for discrete probability mass function. X: np.ndarray Discrete values in probability mass function. Written by Luca Gerotto Based on Matab function "setup_workspace.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 11 February 2017 by Matthew N. White ''' # Find the CDF boundaries of each segment if sigma > 0.0: if tail_N > 0: lo_cut = tail_bound[0] hi_cut = tail_bound[1] else: lo_cut = 0.0 hi_cut = 1.0 inner_size = hi_cut - lo_cut inner_CDF_vals = [lo_cut + x*N**(-1.0)*inner_size for x in range(1, N)] if inner_size < 1.0: scale = 1.0/tail_order mag = (1.0-scale**tail_N)/(1.0-scale) lower_CDF_vals = [0.0] if lo_cut > 0.0: for x in range(tail_N-1,-1,-1): lower_CDF_vals.append(lower_CDF_vals[-1] + lo_cut*scale**x/mag) upper_CDF_vals = [hi_cut] if hi_cut < 1.0: for x in range(tail_N): upper_CDF_vals.append(upper_CDF_vals[-1] + (1.0-hi_cut)*scale**x/mag) CDF_vals = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals temp_cutoffs = list(stats.lognorm.ppf(CDF_vals[1:-1], s=sigma, loc=0, scale=np.exp(mu))) cutoffs = [0] + temp_cutoffs + [np.inf] CDF_vals = np.array(CDF_vals) # Construct the discrete approximation by finding the average value within each segment. # This codeblock ignores warnings because it throws a "divide by zero encountered in log" # warning due to computing erf(infty) at the tail boundary. This is irrelevant and # apparently freaks new users out. with warnings.catch_warnings(): warnings.simplefilter("ignore") K = CDF_vals.size-1 # number of points in approximation pmf = CDF_vals[1:(K+1)] - CDF_vals[0:K] X = np.zeros(K) for i in range(K): zBot = cutoffs[i] zTop = cutoffs[i+1] tempBot = (mu+sigma**2-np.log(zBot))/(np.sqrt(2)*sigma) tempTop = (mu+sigma**2-np.log(zTop))/(np.sqrt(2)*sigma) if tempBot <= 4: X[i] = -0.5*np.exp(mu+(sigma**2)*0.5)*(erf(tempTop) - erf(tempBot))/pmf[i] else: X[i] = -0.5*np.exp(mu+(sigma**2)*0.5)*(erfc(tempBot) - erfc(tempTop))/pmf[i] else: pmf = np.ones(N)/N X = np.exp(mu)*np.ones(N) return [pmf, X]
python
def approxLognormal(N, mu=0.0, sigma=1.0, tail_N=0, tail_bound=[0.02,0.98], tail_order=np.e): ''' Construct a discrete approximation to a lognormal distribution with underlying normal distribution N(mu,sigma). Makes an equiprobable distribution by default, but user can optionally request augmented tails with exponentially sized point masses. This can improve solution accuracy in some models. Parameters ---------- N: int Number of discrete points in the "main part" of the approximation. mu: float Mean of underlying normal distribution. sigma: float Standard deviation of underlying normal distribution. tail_N: int Number of points in each "tail part" of the approximation; 0 = no tail. tail_bound: [float] CDF boundaries of the tails vs main portion; tail_bound[0] is the lower tail bound, tail_bound[1] is the upper tail bound. Inoperative when tail_N = 0. Can make "one tailed" approximations with 0.0 or 1.0. tail_order: float Factor by which consecutive point masses in a "tail part" differ in probability. Should be >= 1 for sensible spacing. Returns ------- pmf: np.ndarray Probabilities for discrete probability mass function. X: np.ndarray Discrete values in probability mass function. Written by Luca Gerotto Based on Matab function "setup_workspace.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 11 February 2017 by Matthew N. White ''' # Find the CDF boundaries of each segment if sigma > 0.0: if tail_N > 0: lo_cut = tail_bound[0] hi_cut = tail_bound[1] else: lo_cut = 0.0 hi_cut = 1.0 inner_size = hi_cut - lo_cut inner_CDF_vals = [lo_cut + x*N**(-1.0)*inner_size for x in range(1, N)] if inner_size < 1.0: scale = 1.0/tail_order mag = (1.0-scale**tail_N)/(1.0-scale) lower_CDF_vals = [0.0] if lo_cut > 0.0: for x in range(tail_N-1,-1,-1): lower_CDF_vals.append(lower_CDF_vals[-1] + lo_cut*scale**x/mag) upper_CDF_vals = [hi_cut] if hi_cut < 1.0: for x in range(tail_N): upper_CDF_vals.append(upper_CDF_vals[-1] + (1.0-hi_cut)*scale**x/mag) CDF_vals = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals temp_cutoffs = list(stats.lognorm.ppf(CDF_vals[1:-1], s=sigma, loc=0, scale=np.exp(mu))) cutoffs = [0] + temp_cutoffs + [np.inf] CDF_vals = np.array(CDF_vals) # Construct the discrete approximation by finding the average value within each segment. # This codeblock ignores warnings because it throws a "divide by zero encountered in log" # warning due to computing erf(infty) at the tail boundary. This is irrelevant and # apparently freaks new users out. with warnings.catch_warnings(): warnings.simplefilter("ignore") K = CDF_vals.size-1 # number of points in approximation pmf = CDF_vals[1:(K+1)] - CDF_vals[0:K] X = np.zeros(K) for i in range(K): zBot = cutoffs[i] zTop = cutoffs[i+1] tempBot = (mu+sigma**2-np.log(zBot))/(np.sqrt(2)*sigma) tempTop = (mu+sigma**2-np.log(zTop))/(np.sqrt(2)*sigma) if tempBot <= 4: X[i] = -0.5*np.exp(mu+(sigma**2)*0.5)*(erf(tempTop) - erf(tempBot))/pmf[i] else: X[i] = -0.5*np.exp(mu+(sigma**2)*0.5)*(erfc(tempBot) - erfc(tempTop))/pmf[i] else: pmf = np.ones(N)/N X = np.exp(mu)*np.ones(N) return [pmf, X]
['def', 'approxLognormal', '(', 'N', ',', 'mu', '=', '0.0', ',', 'sigma', '=', '1.0', ',', 'tail_N', '=', '0', ',', 'tail_bound', '=', '[', '0.02', ',', '0.98', ']', ',', 'tail_order', '=', 'np', '.', 'e', ')', ':', '# Find the CDF boundaries of each segment', 'if', 'sigma', '>', '0.0', ':', 'if', 'tail_N', '>', '0', ':', 'lo_cut', '=', 'tail_bound', '[', '0', ']', 'hi_cut', '=', 'tail_bound', '[', '1', ']', 'else', ':', 'lo_cut', '=', '0.0', 'hi_cut', '=', '1.0', 'inner_size', '=', 'hi_cut', '-', 'lo_cut', 'inner_CDF_vals', '=', '[', 'lo_cut', '+', 'x', '*', 'N', '**', '(', '-', '1.0', ')', '*', 'inner_size', 'for', 'x', 'in', 'range', '(', '1', ',', 'N', ')', ']', 'if', 'inner_size', '<', '1.0', ':', 'scale', '=', '1.0', '/', 'tail_order', 'mag', '=', '(', '1.0', '-', 'scale', '**', 'tail_N', ')', '/', '(', '1.0', '-', 'scale', ')', 'lower_CDF_vals', '=', '[', '0.0', ']', 'if', 'lo_cut', '>', '0.0', ':', 'for', 'x', 'in', 'range', '(', 'tail_N', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'lower_CDF_vals', '.', 'append', '(', 'lower_CDF_vals', '[', '-', '1', ']', '+', 'lo_cut', '*', 'scale', '**', 'x', '/', 'mag', ')', 'upper_CDF_vals', '=', '[', 'hi_cut', ']', 'if', 'hi_cut', '<', '1.0', ':', 'for', 'x', 'in', 'range', '(', 'tail_N', ')', ':', 'upper_CDF_vals', '.', 'append', '(', 'upper_CDF_vals', '[', '-', '1', ']', '+', '(', '1.0', '-', 'hi_cut', ')', '*', 'scale', '**', 'x', '/', 'mag', ')', 'CDF_vals', '=', 'lower_CDF_vals', '+', 'inner_CDF_vals', '+', 'upper_CDF_vals', 'temp_cutoffs', '=', 'list', '(', 'stats', '.', 'lognorm', '.', 'ppf', '(', 'CDF_vals', '[', '1', ':', '-', '1', ']', ',', 's', '=', 'sigma', ',', 'loc', '=', '0', ',', 'scale', '=', 'np', '.', 'exp', '(', 'mu', ')', ')', ')', 'cutoffs', '=', '[', '0', ']', '+', 'temp_cutoffs', '+', '[', 'np', '.', 'inf', ']', 'CDF_vals', '=', 'np', '.', 'array', '(', 'CDF_vals', ')', '# Construct the discrete approximation by finding the average value within each segment.', '# This codeblock ignores warnings because it throws a "divide by zero encountered in log"', '# warning due to computing erf(infty) at the tail boundary. This is irrelevant and', '# apparently freaks new users out.', 'with', 'warnings', '.', 'catch_warnings', '(', ')', ':', 'warnings', '.', 'simplefilter', '(', '"ignore"', ')', 'K', '=', 'CDF_vals', '.', 'size', '-', '1', '# number of points in approximation', 'pmf', '=', 'CDF_vals', '[', '1', ':', '(', 'K', '+', '1', ')', ']', '-', 'CDF_vals', '[', '0', ':', 'K', ']', 'X', '=', 'np', '.', 'zeros', '(', 'K', ')', 'for', 'i', 'in', 'range', '(', 'K', ')', ':', 'zBot', '=', 'cutoffs', '[', 'i', ']', 'zTop', '=', 'cutoffs', '[', 'i', '+', '1', ']', 'tempBot', '=', '(', 'mu', '+', 'sigma', '**', '2', '-', 'np', '.', 'log', '(', 'zBot', ')', ')', '/', '(', 'np', '.', 'sqrt', '(', '2', ')', '*', 'sigma', ')', 'tempTop', '=', '(', 'mu', '+', 'sigma', '**', '2', '-', 'np', '.', 'log', '(', 'zTop', ')', ')', '/', '(', 'np', '.', 'sqrt', '(', '2', ')', '*', 'sigma', ')', 'if', 'tempBot', '<=', '4', ':', 'X', '[', 'i', ']', '=', '-', '0.5', '*', 'np', '.', 'exp', '(', 'mu', '+', '(', 'sigma', '**', '2', ')', '*', '0.5', ')', '*', '(', 'erf', '(', 'tempTop', ')', '-', 'erf', '(', 'tempBot', ')', ')', '/', 'pmf', '[', 'i', ']', 'else', ':', 'X', '[', 'i', ']', '=', '-', '0.5', '*', 'np', '.', 'exp', '(', 'mu', '+', '(', 'sigma', '**', '2', ')', '*', '0.5', ')', '*', '(', 'erfc', '(', 'tempBot', ')', '-', 'erfc', '(', 'tempTop', ')', ')', '/', 'pmf', '[', 'i', ']', 'else', ':', 'pmf', '=', 'np', '.', 'ones', '(', 'N', ')', '/', 'N', 'X', '=', 'np', '.', 'exp', '(', 'mu', ')', '*', 'np', '.', 'ones', '(', 'N', ')', 'return', '[', 'pmf', ',', 'X', ']']
Construct a discrete approximation to a lognormal distribution with underlying normal distribution N(mu,sigma). Makes an equiprobable distribution by default, but user can optionally request augmented tails with exponentially sized point masses. This can improve solution accuracy in some models. Parameters ---------- N: int Number of discrete points in the "main part" of the approximation. mu: float Mean of underlying normal distribution. sigma: float Standard deviation of underlying normal distribution. tail_N: int Number of points in each "tail part" of the approximation; 0 = no tail. tail_bound: [float] CDF boundaries of the tails vs main portion; tail_bound[0] is the lower tail bound, tail_bound[1] is the upper tail bound. Inoperative when tail_N = 0. Can make "one tailed" approximations with 0.0 or 1.0. tail_order: float Factor by which consecutive point masses in a "tail part" differ in probability. Should be >= 1 for sensible spacing. Returns ------- pmf: np.ndarray Probabilities for discrete probability mass function. X: np.ndarray Discrete values in probability mass function. Written by Luca Gerotto Based on Matab function "setup_workspace.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 11 February 2017 by Matthew N. White
['Construct', 'a', 'discrete', 'approximation', 'to', 'a', 'lognormal', 'distribution', 'with', 'underlying', 'normal', 'distribution', 'N', '(', 'mu', 'sigma', ')', '.', 'Makes', 'an', 'equiprobable', 'distribution', 'by', 'default', 'but', 'user', 'can', 'optionally', 'request', 'augmented', 'tails', 'with', 'exponentially', 'sized', 'point', 'masses', '.', 'This', 'can', 'improve', 'solution', 'accuracy', 'in', 'some', 'models', '.']
train
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L435-L521
1,548
jalanb/pysyte
pysyte/paths.py
cd
def cd(path_to): # pylint: disable=invalid-name """cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-') """ if path_to == '-': if not cd.previous: raise PathError('No previous directory to return to') return cd(cd.previous) if not hasattr(path_to, 'cd'): path_to = makepath(path_to) try: previous = os.getcwd() except OSError as e: if 'No such file or directory' in str(e): return False raise if path_to.isdir(): os.chdir(path_to) elif path_to.isfile(): os.chdir(path_to.parent) elif not os.path.exists(path_to): return False else: raise PathError('Cannot cd to %s' % path_to) cd.previous = previous return True
python
def cd(path_to): # pylint: disable=invalid-name """cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-') """ if path_to == '-': if not cd.previous: raise PathError('No previous directory to return to') return cd(cd.previous) if not hasattr(path_to, 'cd'): path_to = makepath(path_to) try: previous = os.getcwd() except OSError as e: if 'No such file or directory' in str(e): return False raise if path_to.isdir(): os.chdir(path_to) elif path_to.isfile(): os.chdir(path_to.parent) elif not os.path.exists(path_to): return False else: raise PathError('Cannot cd to %s' % path_to) cd.previous = previous return True
['def', 'cd', '(', 'path_to', ')', ':', '# pylint: disable=invalid-name', 'if', 'path_to', '==', "'-'", ':', 'if', 'not', 'cd', '.', 'previous', ':', 'raise', 'PathError', '(', "'No previous directory to return to'", ')', 'return', 'cd', '(', 'cd', '.', 'previous', ')', 'if', 'not', 'hasattr', '(', 'path_to', ',', "'cd'", ')', ':', 'path_to', '=', 'makepath', '(', 'path_to', ')', 'try', ':', 'previous', '=', 'os', '.', 'getcwd', '(', ')', 'except', 'OSError', 'as', 'e', ':', 'if', "'No such file or directory'", 'in', 'str', '(', 'e', ')', ':', 'return', 'False', 'raise', 'if', 'path_to', '.', 'isdir', '(', ')', ':', 'os', '.', 'chdir', '(', 'path_to', ')', 'elif', 'path_to', '.', 'isfile', '(', ')', ':', 'os', '.', 'chdir', '(', 'path_to', '.', 'parent', ')', 'elif', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path_to', ')', ':', 'return', 'False', 'else', ':', 'raise', 'PathError', '(', "'Cannot cd to %s'", '%', 'path_to', ')', 'cd', '.', 'previous', '=', 'previous', 'return', 'True']
cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-')
['cd', 'to', 'the', 'given', 'path']
train
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/paths.py#L622-L651
1,549
taskcluster/taskcluster-client.py
taskcluster/queueevents.py
QueueEvents.taskException
def taskException(self, *args, **kwargs): """ Task Exception Messages Whenever Taskcluster fails to run a message is posted to this exchange. This happens if the task isn't completed before its `deadlìne`, all retries failed (i.e. workers stopped responding), the task was canceled by another entity, or the task carried a malformed payload. The specific _reason_ is evident from that task status structure, refer to the `reasonResolved` property for the last run. This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'task-exception', 'name': 'taskException', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'taskId', }, { 'multipleWords': False, 'name': 'runId', }, { 'multipleWords': False, 'name': 'workerGroup', }, { 'multipleWords': False, 'name': 'workerId', }, { 'multipleWords': False, 'name': 'provisionerId', }, { 'multipleWords': False, 'name': 'workerType', }, { 'multipleWords': False, 'name': 'schedulerId', }, { 'multipleWords': False, 'name': 'taskGroupId', }, { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/task-exception-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
python
def taskException(self, *args, **kwargs): """ Task Exception Messages Whenever Taskcluster fails to run a message is posted to this exchange. This happens if the task isn't completed before its `deadlìne`, all retries failed (i.e. workers stopped responding), the task was canceled by another entity, or the task carried a malformed payload. The specific _reason_ is evident from that task status structure, refer to the `reasonResolved` property for the last run. This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'task-exception', 'name': 'taskException', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'taskId', }, { 'multipleWords': False, 'name': 'runId', }, { 'multipleWords': False, 'name': 'workerGroup', }, { 'multipleWords': False, 'name': 'workerId', }, { 'multipleWords': False, 'name': 'provisionerId', }, { 'multipleWords': False, 'name': 'workerType', }, { 'multipleWords': False, 'name': 'schedulerId', }, { 'multipleWords': False, 'name': 'taskGroupId', }, { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/task-exception-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
['def', 'taskException', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ref', '=', '{', "'exchange'", ':', "'task-exception'", ',', "'name'", ':', "'taskException'", ',', "'routingKey'", ':', '[', '{', "'constant'", ':', "'primary'", ',', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'routingKeyKind'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'taskId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'runId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerGroup'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'provisionerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'workerType'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'schedulerId'", ',', '}', ',', '{', "'multipleWords'", ':', 'False', ',', "'name'", ':', "'taskGroupId'", ',', '}', ',', '{', "'multipleWords'", ':', 'True', ',', "'name'", ':', "'reserved'", ',', '}', ',', ']', ',', "'schema'", ':', "'v1/task-exception-message.json#'", ',', '}', 'return', 'self', '.', '_makeTopicExchange', '(', 'ref', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Task Exception Messages Whenever Taskcluster fails to run a message is posted to this exchange. This happens if the task isn't completed before its `deadlìne`, all retries failed (i.e. workers stopped responding), the task was canceled by another entity, or the task carried a malformed payload. The specific _reason_ is evident from that task status structure, refer to the `reasonResolved` property for the last run. This exchange outputs: ``v1/task-exception-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
['Task', 'Exception', 'Messages']
train
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queueevents.py#L582-L665
1,550
vallis/libstempo
libstempo/spharmORFbasis.py
rotated_Gamma_ml
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function takes any gamma in the computational frame and rotates it to the cosmic frame. """ rotated_gamma = 0 for ii in range(2*l+1): rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii] return rotated_gamma
python
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function takes any gamma in the computational frame and rotates it to the cosmic frame. """ rotated_gamma = 0 for ii in range(2*l+1): rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii] return rotated_gamma
['def', 'rotated_Gamma_ml', '(', 'm', ',', 'l', ',', 'phi1', ',', 'phi2', ',', 'theta1', ',', 'theta2', ',', 'gamma_ml', ')', ':', 'rotated_gamma', '=', '0', 'for', 'ii', 'in', 'range', '(', '2', '*', 'l', '+', '1', ')', ':', 'rotated_gamma', '+=', 'Dlmk', '(', 'l', ',', 'm', ',', 'ii', '-', 'l', ',', 'phi1', ',', 'phi2', ',', 'theta1', ',', 'theta2', ')', '.', 'conjugate', '(', ')', '*', 'gamma_ml', '[', 'ii', ']', 'return', 'rotated_gamma']
This function takes any gamma in the computational frame and rotates it to the cosmic frame.
['This', 'function', 'takes', 'any', 'gamma', 'in', 'the', 'computational', 'frame', 'and', 'rotates', 'it', 'to', 'the', 'cosmic', 'frame', '.']
train
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L266-L278
1,551
hatemile/hatemile-for-python
hatemile/implementation/css.py
AccessibleCSSImplementation._operation_speak_as_literal_punctuation
def _operation_speak_as_literal_punctuation( self, content, index, children ): """ The operation method of _speak_as method for literal-punctuation. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement) """ data_property_value = 'literal-punctuation' if index != 0: children.append(self._create_content_element( content[0:index], data_property_value )) children.append(self._create_aural_content_element( ( ' ' + self._get_description_of_symbol(content[index:(index + 1)]) + ' ' ), data_property_value) ) children.append(self._create_visual_content_element( content[index:(index + 1)], data_property_value )) return children
python
def _operation_speak_as_literal_punctuation( self, content, index, children ): """ The operation method of _speak_as method for literal-punctuation. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement) """ data_property_value = 'literal-punctuation' if index != 0: children.append(self._create_content_element( content[0:index], data_property_value )) children.append(self._create_aural_content_element( ( ' ' + self._get_description_of_symbol(content[index:(index + 1)]) + ' ' ), data_property_value) ) children.append(self._create_visual_content_element( content[index:(index + 1)], data_property_value )) return children
['def', '_operation_speak_as_literal_punctuation', '(', 'self', ',', 'content', ',', 'index', ',', 'children', ')', ':', 'data_property_value', '=', "'literal-punctuation'", 'if', 'index', '!=', '0', ':', 'children', '.', 'append', '(', 'self', '.', '_create_content_element', '(', 'content', '[', '0', ':', 'index', ']', ',', 'data_property_value', ')', ')', 'children', '.', 'append', '(', 'self', '.', '_create_aural_content_element', '(', '(', "' '", '+', 'self', '.', '_get_description_of_symbol', '(', 'content', '[', 'index', ':', '(', 'index', '+', '1', ')', ']', ')', '+', "' '", ')', ',', 'data_property_value', ')', ')', 'children', '.', 'append', '(', 'self', '.', '_create_visual_content_element', '(', 'content', '[', 'index', ':', '(', 'index', '+', '1', ')', ']', ',', 'data_property_value', ')', ')', 'return', 'children']
The operation method of _speak_as method for literal-punctuation. :param content: The text content of element. :type content: str :param index: The index of pattern in text content of element. :type index: int :param children: The children of element. :type children: list(hatemile.util.html.htmldomelement.HTMLDOMElement)
['The', 'operation', 'method', 'of', '_speak_as', 'method', 'for', 'literal', '-', 'punctuation', '.']
train
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/css.py#L229-L266
1,552
gebn/nibble
nibble/util.py
log_level_from_vebosity
def log_level_from_vebosity(verbosity): """ Get the `logging` module log level from a verbosity. :param verbosity: The number of times the `-v` option was specified. :return: The corresponding log level. """ if verbosity == 0: return logging.WARNING if verbosity == 1: return logging.INFO return logging.DEBUG
python
def log_level_from_vebosity(verbosity): """ Get the `logging` module log level from a verbosity. :param verbosity: The number of times the `-v` option was specified. :return: The corresponding log level. """ if verbosity == 0: return logging.WARNING if verbosity == 1: return logging.INFO return logging.DEBUG
['def', 'log_level_from_vebosity', '(', 'verbosity', ')', ':', 'if', 'verbosity', '==', '0', ':', 'return', 'logging', '.', 'WARNING', 'if', 'verbosity', '==', '1', ':', 'return', 'logging', '.', 'INFO', 'return', 'logging', '.', 'DEBUG']
Get the `logging` module log level from a verbosity. :param verbosity: The number of times the `-v` option was specified. :return: The corresponding log level.
['Get', 'the', 'logging', 'module', 'log', 'level', 'from', 'a', 'verbosity', '.']
train
https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/util.py#L35-L46
1,553
vijayvarma392/surfinBH
surfinBH/_fit_evaluators/fit_7dq2.py
Fit7dq2._load_fits
def _load_fits(self, h5file): """ Loads fits from h5file and returns a dictionary of fits. """ fits = {} for key in ['mf']: fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file) for key in ['chif', 'vf']: fits[key] = self._load_vector_fit(key, h5file) return fits
python
def _load_fits(self, h5file): """ Loads fits from h5file and returns a dictionary of fits. """ fits = {} for key in ['mf']: fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file) for key in ['chif', 'vf']: fits[key] = self._load_vector_fit(key, h5file) return fits
['def', '_load_fits', '(', 'self', ',', 'h5file', ')', ':', 'fits', '=', '{', '}', 'for', 'key', 'in', '[', "'mf'", ']', ':', 'fits', '[', 'key', ']', '=', 'self', '.', '_load_scalar_fit', '(', 'fit_key', '=', 'key', ',', 'h5file', '=', 'h5file', ')', 'for', 'key', 'in', '[', "'chif'", ',', "'vf'", ']', ':', 'fits', '[', 'key', ']', '=', 'self', '.', '_load_vector_fit', '(', 'key', ',', 'h5file', ')', 'return', 'fits']
Loads fits from h5file and returns a dictionary of fits.
['Loads', 'fits', 'from', 'h5file', 'and', 'returns', 'a', 'dictionary', 'of', 'fits', '.']
train
https://github.com/vijayvarma392/surfinBH/blob/9f2d25d00f894ee2ce9ffbb02f4e4a41fa7989eb/surfinBH/_fit_evaluators/fit_7dq2.py#L149-L156
1,554
cloudendpoints/endpoints-python
endpoints/openapi_generator.py
OpenApiGenerator.__api_openapi_descriptor
def __api_openapi_descriptor(self, services, hostname=None, x_google_api_name=False): """Builds an OpenAPI description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary that can be deserialized into JSON and stored as an API description document in OpenAPI format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature. """ merged_api_info = self.__get_merged_api_info(services) descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname, x_google_api_name=x_google_api_name) description = merged_api_info.description if not description and len(services) == 1: description = services[0].__doc__ if description: descriptor['info']['description'] = description security_definitions = self.__security_definitions_descriptor( merged_api_info.issuers) method_map = {} method_collision_tracker = {} rest_collision_tracker = {} for service in services: remote_methods = service.all_remote_methods() for protorpc_meth_name in sorted(remote_methods.iterkeys()): protorpc_meth_info = remote_methods[protorpc_meth_name] method_info = getattr(protorpc_meth_info, 'method_info', None) # Skip methods that are not decorated with @method if method_info is None: continue method_id = method_info.method_id(service.api_info) is_api_key_required = method_info.is_api_key_required(service.api_info) path = '/{0}/{1}/{2}'.format(merged_api_info.name, merged_api_info.path_version, method_info.get_path(service.api_info)) verb = method_info.http_method.lower() if path not in method_map: method_map[path] = {} # If an API key is required and the security definitions don't already # have the apiKey issuer, add the appropriate notation now if is_api_key_required and _API_KEY not in security_definitions: security_definitions[_API_KEY] = { 'type': 'apiKey', 'name': _API_KEY_PARAM, 'in': 'query' } # Derive an OperationId from the method name data operation_id = self._construct_operation_id( service.__name__, protorpc_meth_name) method_map[path][verb] = self.__method_descriptor( service, method_info, operation_id, protorpc_meth_info, security_definitions) # Make sure the same method name isn't repeated. if method_id in method_collision_tracker: raise api_exceptions.ApiConfigurationError( 'Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__)) else: method_collision_tracker[method_id] = service.__name__ # Make sure the same HTTP method & path aren't repeated. rest_identifier = (method_info.http_method, method_info.get_path(service.api_info)) if rest_identifier in rest_collision_tracker: raise api_exceptions.ApiConfigurationError( '%s path "%s" used multiple times, in classes %s and %s' % (method_info.http_method, method_info.get_path(service.api_info), rest_collision_tracker[rest_identifier], service.__name__)) else: rest_collision_tracker[rest_identifier] = service.__name__ if method_map: descriptor['paths'] = method_map # Add request and/or response definitions, if any definitions = self.__definitions_descriptor() if definitions: descriptor['definitions'] = definitions descriptor['securityDefinitions'] = security_definitions # Add quota limit metric definitions, if any limit_definitions = self.__x_google_quota_definitions_descriptor( merged_api_info.limit_definitions) if limit_definitions: descriptor['x-google-management'] = limit_definitions return descriptor
python
def __api_openapi_descriptor(self, services, hostname=None, x_google_api_name=False): """Builds an OpenAPI description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary that can be deserialized into JSON and stored as an API description document in OpenAPI format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature. """ merged_api_info = self.__get_merged_api_info(services) descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname, x_google_api_name=x_google_api_name) description = merged_api_info.description if not description and len(services) == 1: description = services[0].__doc__ if description: descriptor['info']['description'] = description security_definitions = self.__security_definitions_descriptor( merged_api_info.issuers) method_map = {} method_collision_tracker = {} rest_collision_tracker = {} for service in services: remote_methods = service.all_remote_methods() for protorpc_meth_name in sorted(remote_methods.iterkeys()): protorpc_meth_info = remote_methods[protorpc_meth_name] method_info = getattr(protorpc_meth_info, 'method_info', None) # Skip methods that are not decorated with @method if method_info is None: continue method_id = method_info.method_id(service.api_info) is_api_key_required = method_info.is_api_key_required(service.api_info) path = '/{0}/{1}/{2}'.format(merged_api_info.name, merged_api_info.path_version, method_info.get_path(service.api_info)) verb = method_info.http_method.lower() if path not in method_map: method_map[path] = {} # If an API key is required and the security definitions don't already # have the apiKey issuer, add the appropriate notation now if is_api_key_required and _API_KEY not in security_definitions: security_definitions[_API_KEY] = { 'type': 'apiKey', 'name': _API_KEY_PARAM, 'in': 'query' } # Derive an OperationId from the method name data operation_id = self._construct_operation_id( service.__name__, protorpc_meth_name) method_map[path][verb] = self.__method_descriptor( service, method_info, operation_id, protorpc_meth_info, security_definitions) # Make sure the same method name isn't repeated. if method_id in method_collision_tracker: raise api_exceptions.ApiConfigurationError( 'Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__)) else: method_collision_tracker[method_id] = service.__name__ # Make sure the same HTTP method & path aren't repeated. rest_identifier = (method_info.http_method, method_info.get_path(service.api_info)) if rest_identifier in rest_collision_tracker: raise api_exceptions.ApiConfigurationError( '%s path "%s" used multiple times, in classes %s and %s' % (method_info.http_method, method_info.get_path(service.api_info), rest_collision_tracker[rest_identifier], service.__name__)) else: rest_collision_tracker[rest_identifier] = service.__name__ if method_map: descriptor['paths'] = method_map # Add request and/or response definitions, if any definitions = self.__definitions_descriptor() if definitions: descriptor['definitions'] = definitions descriptor['securityDefinitions'] = security_definitions # Add quota limit metric definitions, if any limit_definitions = self.__x_google_quota_definitions_descriptor( merged_api_info.limit_definitions) if limit_definitions: descriptor['x-google-management'] = limit_definitions return descriptor
['def', '__api_openapi_descriptor', '(', 'self', ',', 'services', ',', 'hostname', '=', 'None', ',', 'x_google_api_name', '=', 'False', ')', ':', 'merged_api_info', '=', 'self', '.', '__get_merged_api_info', '(', 'services', ')', 'descriptor', '=', 'self', '.', 'get_descriptor_defaults', '(', 'merged_api_info', ',', 'hostname', '=', 'hostname', ',', 'x_google_api_name', '=', 'x_google_api_name', ')', 'description', '=', 'merged_api_info', '.', 'description', 'if', 'not', 'description', 'and', 'len', '(', 'services', ')', '==', '1', ':', 'description', '=', 'services', '[', '0', ']', '.', '__doc__', 'if', 'description', ':', 'descriptor', '[', "'info'", ']', '[', "'description'", ']', '=', 'description', 'security_definitions', '=', 'self', '.', '__security_definitions_descriptor', '(', 'merged_api_info', '.', 'issuers', ')', 'method_map', '=', '{', '}', 'method_collision_tracker', '=', '{', '}', 'rest_collision_tracker', '=', '{', '}', 'for', 'service', 'in', 'services', ':', 'remote_methods', '=', 'service', '.', 'all_remote_methods', '(', ')', 'for', 'protorpc_meth_name', 'in', 'sorted', '(', 'remote_methods', '.', 'iterkeys', '(', ')', ')', ':', 'protorpc_meth_info', '=', 'remote_methods', '[', 'protorpc_meth_name', ']', 'method_info', '=', 'getattr', '(', 'protorpc_meth_info', ',', "'method_info'", ',', 'None', ')', '# Skip methods that are not decorated with @method', 'if', 'method_info', 'is', 'None', ':', 'continue', 'method_id', '=', 'method_info', '.', 'method_id', '(', 'service', '.', 'api_info', ')', 'is_api_key_required', '=', 'method_info', '.', 'is_api_key_required', '(', 'service', '.', 'api_info', ')', 'path', '=', "'/{0}/{1}/{2}'", '.', 'format', '(', 'merged_api_info', '.', 'name', ',', 'merged_api_info', '.', 'path_version', ',', 'method_info', '.', 'get_path', '(', 'service', '.', 'api_info', ')', ')', 'verb', '=', 'method_info', '.', 'http_method', '.', 'lower', '(', ')', 'if', 'path', 'not', 'in', 'method_map', ':', 'method_map', '[', 'path', ']', '=', '{', '}', "# If an API key is required and the security definitions don't already", '# have the apiKey issuer, add the appropriate notation now', 'if', 'is_api_key_required', 'and', '_API_KEY', 'not', 'in', 'security_definitions', ':', 'security_definitions', '[', '_API_KEY', ']', '=', '{', "'type'", ':', "'apiKey'", ',', "'name'", ':', '_API_KEY_PARAM', ',', "'in'", ':', "'query'", '}', '# Derive an OperationId from the method name data', 'operation_id', '=', 'self', '.', '_construct_operation_id', '(', 'service', '.', '__name__', ',', 'protorpc_meth_name', ')', 'method_map', '[', 'path', ']', '[', 'verb', ']', '=', 'self', '.', '__method_descriptor', '(', 'service', ',', 'method_info', ',', 'operation_id', ',', 'protorpc_meth_info', ',', 'security_definitions', ')', "# Make sure the same method name isn't repeated.", 'if', 'method_id', 'in', 'method_collision_tracker', ':', 'raise', 'api_exceptions', '.', 'ApiConfigurationError', '(', "'Method %s used multiple times, in classes %s and %s'", '%', '(', 'method_id', ',', 'method_collision_tracker', '[', 'method_id', ']', ',', 'service', '.', '__name__', ')', ')', 'else', ':', 'method_collision_tracker', '[', 'method_id', ']', '=', 'service', '.', '__name__', "# Make sure the same HTTP method & path aren't repeated.", 'rest_identifier', '=', '(', 'method_info', '.', 'http_method', ',', 'method_info', '.', 'get_path', '(', 'service', '.', 'api_info', ')', ')', 'if', 'rest_identifier', 'in', 'rest_collision_tracker', ':', 'raise', 'api_exceptions', '.', 'ApiConfigurationError', '(', '\'%s path "%s" used multiple times, in classes %s and %s\'', '%', '(', 'method_info', '.', 'http_method', ',', 'method_info', '.', 'get_path', '(', 'service', '.', 'api_info', ')', ',', 'rest_collision_tracker', '[', 'rest_identifier', ']', ',', 'service', '.', '__name__', ')', ')', 'else', ':', 'rest_collision_tracker', '[', 'rest_identifier', ']', '=', 'service', '.', '__name__', 'if', 'method_map', ':', 'descriptor', '[', "'paths'", ']', '=', 'method_map', '# Add request and/or response definitions, if any', 'definitions', '=', 'self', '.', '__definitions_descriptor', '(', ')', 'if', 'definitions', ':', 'descriptor', '[', "'definitions'", ']', '=', 'definitions', 'descriptor', '[', "'securityDefinitions'", ']', '=', 'security_definitions', '# Add quota limit metric definitions, if any', 'limit_definitions', '=', 'self', '.', '__x_google_quota_definitions_descriptor', '(', 'merged_api_info', '.', 'limit_definitions', ')', 'if', 'limit_definitions', ':', 'descriptor', '[', "'x-google-management'", ']', '=', 'limit_definitions', 'return', 'descriptor']
Builds an OpenAPI description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary that can be deserialized into JSON and stored as an API description document in OpenAPI format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature.
['Builds', 'an', 'OpenAPI', 'description', 'of', 'an', 'API', '.']
train
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/openapi_generator.py#L883-L993
1,555
kennethreitz/grequests
grequests.py
imap
def imap(requests, stream=False, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ pool = Pool(size) def send(r): return r.send(stream=stream) for request in pool.imap_unordered(send, requests): if request.response is not None: yield request.response elif exception_handler: ex_result = exception_handler(request, request.exception) if ex_result is not None: yield ex_result pool.join()
python
def imap(requests, stream=False, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ pool = Pool(size) def send(r): return r.send(stream=stream) for request in pool.imap_unordered(send, requests): if request.response is not None: yield request.response elif exception_handler: ex_result = exception_handler(request, request.exception) if ex_result is not None: yield ex_result pool.join()
['def', 'imap', '(', 'requests', ',', 'stream', '=', 'False', ',', 'size', '=', '2', ',', 'exception_handler', '=', 'None', ')', ':', 'pool', '=', 'Pool', '(', 'size', ')', 'def', 'send', '(', 'r', ')', ':', 'return', 'r', '.', 'send', '(', 'stream', '=', 'stream', ')', 'for', 'request', 'in', 'pool', '.', 'imap_unordered', '(', 'send', ',', 'requests', ')', ':', 'if', 'request', '.', 'response', 'is', 'not', 'None', ':', 'yield', 'request', '.', 'response', 'elif', 'exception_handler', ':', 'ex_result', '=', 'exception_handler', '(', 'request', ',', 'request', '.', 'exception', ')', 'if', 'ex_result', 'is', 'not', 'None', ':', 'yield', 'ex_result', 'pool', '.', 'join', '(', ')']
Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If True, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception
['Concurrently', 'converts', 'a', 'generator', 'object', 'of', 'Requests', 'to', 'a', 'generator', 'of', 'Responses', '.']
train
https://github.com/kennethreitz/grequests/blob/ba25872510e06af213b0c209d204cdc913e3a429/grequests.py#L132-L155
1,556
99designs/colorific
colorific/palette.py
color_stream_st
def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs): """ Read filenames from the input stream and detect their palette. """ for line in istream: filename = line.strip() try: palette = extract_colors(filename, **kwargs) except Exception as e: print(filename, e, file=sys.stderr) continue print_colors(filename, palette) if save_palette: save_palette_as_image(filename, palette)
python
def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs): """ Read filenames from the input stream and detect their palette. """ for line in istream: filename = line.strip() try: palette = extract_colors(filename, **kwargs) except Exception as e: print(filename, e, file=sys.stderr) continue print_colors(filename, palette) if save_palette: save_palette_as_image(filename, palette)
['def', 'color_stream_st', '(', 'istream', '=', 'sys', '.', 'stdin', ',', 'save_palette', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'for', 'line', 'in', 'istream', ':', 'filename', '=', 'line', '.', 'strip', '(', ')', 'try', ':', 'palette', '=', 'extract_colors', '(', 'filename', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', 'as', 'e', ':', 'print', '(', 'filename', ',', 'e', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'continue', 'print_colors', '(', 'filename', ',', 'palette', ')', 'if', 'save_palette', ':', 'save_palette_as_image', '(', 'filename', ',', 'palette', ')']
Read filenames from the input stream and detect their palette.
['Read', 'filenames', 'from', 'the', 'input', 'stream', 'and', 'detect', 'their', 'palette', '.']
train
https://github.com/99designs/colorific/blob/f83e59f61295500f5527dee5894207f2f033cf35/colorific/palette.py#L37-L52
1,557
bram85/topydo
topydo/lib/Importance.py
importance
def importance(p_todo, p_ignore_weekend=config().ignore_weekends()): """ Calculates the importance of the given task. Returns an importance of zero when the task has been completed. If p_ignore_weekend is True, the importance value of the due date will be calculated as if Friday is immediately followed by Monday. This in case of a todo list at the office and you don't work during the weekends (you don't, right?) """ result = 2 priority = p_todo.priority() result += IMPORTANCE_VALUE[priority] if priority in IMPORTANCE_VALUE else 0 if p_todo.has_tag(config().tag_due()): days_left = p_todo.days_till_due() if days_left >= 7 and days_left < 14: result += 1 elif days_left >= 2 and days_left < 7: result += 2 elif days_left >= 1 and days_left < 2: result += 3 elif days_left >= 0 and days_left < 1: result += 5 elif days_left < 0: result += 6 if p_ignore_weekend and is_due_next_monday(p_todo): result += 1 if p_todo.has_tag(config().tag_star()): result += 1 return result if not p_todo.is_completed() else 0
python
def importance(p_todo, p_ignore_weekend=config().ignore_weekends()): """ Calculates the importance of the given task. Returns an importance of zero when the task has been completed. If p_ignore_weekend is True, the importance value of the due date will be calculated as if Friday is immediately followed by Monday. This in case of a todo list at the office and you don't work during the weekends (you don't, right?) """ result = 2 priority = p_todo.priority() result += IMPORTANCE_VALUE[priority] if priority in IMPORTANCE_VALUE else 0 if p_todo.has_tag(config().tag_due()): days_left = p_todo.days_till_due() if days_left >= 7 and days_left < 14: result += 1 elif days_left >= 2 and days_left < 7: result += 2 elif days_left >= 1 and days_left < 2: result += 3 elif days_left >= 0 and days_left < 1: result += 5 elif days_left < 0: result += 6 if p_ignore_weekend and is_due_next_monday(p_todo): result += 1 if p_todo.has_tag(config().tag_star()): result += 1 return result if not p_todo.is_completed() else 0
['def', 'importance', '(', 'p_todo', ',', 'p_ignore_weekend', '=', 'config', '(', ')', '.', 'ignore_weekends', '(', ')', ')', ':', 'result', '=', '2', 'priority', '=', 'p_todo', '.', 'priority', '(', ')', 'result', '+=', 'IMPORTANCE_VALUE', '[', 'priority', ']', 'if', 'priority', 'in', 'IMPORTANCE_VALUE', 'else', '0', 'if', 'p_todo', '.', 'has_tag', '(', 'config', '(', ')', '.', 'tag_due', '(', ')', ')', ':', 'days_left', '=', 'p_todo', '.', 'days_till_due', '(', ')', 'if', 'days_left', '>=', '7', 'and', 'days_left', '<', '14', ':', 'result', '+=', '1', 'elif', 'days_left', '>=', '2', 'and', 'days_left', '<', '7', ':', 'result', '+=', '2', 'elif', 'days_left', '>=', '1', 'and', 'days_left', '<', '2', ':', 'result', '+=', '3', 'elif', 'days_left', '>=', '0', 'and', 'days_left', '<', '1', ':', 'result', '+=', '5', 'elif', 'days_left', '<', '0', ':', 'result', '+=', '6', 'if', 'p_ignore_weekend', 'and', 'is_due_next_monday', '(', 'p_todo', ')', ':', 'result', '+=', '1', 'if', 'p_todo', '.', 'has_tag', '(', 'config', '(', ')', '.', 'tag_star', '(', ')', ')', ':', 'result', '+=', '1', 'return', 'result', 'if', 'not', 'p_todo', '.', 'is_completed', '(', ')', 'else', '0']
Calculates the importance of the given task. Returns an importance of zero when the task has been completed. If p_ignore_weekend is True, the importance value of the due date will be calculated as if Friday is immediately followed by Monday. This in case of a todo list at the office and you don't work during the weekends (you don't, right?)
['Calculates', 'the', 'importance', 'of', 'the', 'given', 'task', '.', 'Returns', 'an', 'importance', 'of', 'zero', 'when', 'the', 'task', 'has', 'been', 'completed', '.']
train
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/Importance.py#L44-L79
1,558
PyHDI/Pyverilog
pyverilog/vparser/parser.py
VerilogParser.p_single_statement_systemcall
def p_single_statement_systemcall(self, p): 'single_statement : systemcall SEMICOLON' p[0] = SingleStatement(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
python
def p_single_statement_systemcall(self, p): 'single_statement : systemcall SEMICOLON' p[0] = SingleStatement(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
['def', 'p_single_statement_systemcall', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'SingleStatement', '(', 'p', '[', '1', ']', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '1', ')', ')', 'p', '.', 'set_lineno', '(', '0', ',', 'p', '.', 'lineno', '(', '1', ')', ')']
single_statement : systemcall SEMICOLON
['single_statement', ':', 'systemcall', 'SEMICOLON']
train
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L2177-L2180
1,559
espressif/esptool
esptool.py
ESPLoader.flash_spi_attach
def flash_spi_attach(self, hspi_arg): """Send SPI attach command to enable the SPI flash pins ESP8266 ROM does this when you send flash_begin, ESP32 ROM has it as a SPI command. """ # last 3 bytes in ESP_SPI_ATTACH argument are reserved values arg = struct.pack('<I', hspi_arg) if not self.IS_STUB: # ESP32 ROM loader takes additional 'is legacy' arg, which is not # currently supported in the stub loader or esptool.py (as it's not usually needed.) is_legacy = 0 arg += struct.pack('BBBB', is_legacy, 0, 0, 0) self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg)
python
def flash_spi_attach(self, hspi_arg): """Send SPI attach command to enable the SPI flash pins ESP8266 ROM does this when you send flash_begin, ESP32 ROM has it as a SPI command. """ # last 3 bytes in ESP_SPI_ATTACH argument are reserved values arg = struct.pack('<I', hspi_arg) if not self.IS_STUB: # ESP32 ROM loader takes additional 'is legacy' arg, which is not # currently supported in the stub loader or esptool.py (as it's not usually needed.) is_legacy = 0 arg += struct.pack('BBBB', is_legacy, 0, 0, 0) self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg)
['def', 'flash_spi_attach', '(', 'self', ',', 'hspi_arg', ')', ':', '# last 3 bytes in ESP_SPI_ATTACH argument are reserved values', 'arg', '=', 'struct', '.', 'pack', '(', "'<I'", ',', 'hspi_arg', ')', 'if', 'not', 'self', '.', 'IS_STUB', ':', "# ESP32 ROM loader takes additional 'is legacy' arg, which is not", "# currently supported in the stub loader or esptool.py (as it's not usually needed.)", 'is_legacy', '=', '0', 'arg', '+=', 'struct', '.', 'pack', '(', "'BBBB'", ',', 'is_legacy', ',', '0', ',', '0', ',', '0', ')', 'self', '.', 'check_command', '(', '"configure SPI flash pins"', ',', 'ESP32ROM', '.', 'ESP_SPI_ATTACH', ',', 'arg', ')']
Send SPI attach command to enable the SPI flash pins ESP8266 ROM does this when you send flash_begin, ESP32 ROM has it as a SPI command.
['Send', 'SPI', 'attach', 'command', 'to', 'enable', 'the', 'SPI', 'flash', 'pins']
train
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/esptool.py#L759-L772
1,560
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/cli/utils.py
maybe_print_as_json
def maybe_print_as_json(opts, data, page_info=None): """Maybe print data as JSON.""" if opts.output not in ("json", "pretty_json"): return False root = {"data": data} if page_info is not None and page_info.is_valid: meta = root["meta"] = {} meta["pagination"] = page_info.as_dict(num_results=len(data)) if opts.output == "pretty_json": dump = json.dumps(root, indent=4, sort_keys=True) else: dump = json.dumps(root, sort_keys=True) click.echo(dump) return True
python
def maybe_print_as_json(opts, data, page_info=None): """Maybe print data as JSON.""" if opts.output not in ("json", "pretty_json"): return False root = {"data": data} if page_info is not None and page_info.is_valid: meta = root["meta"] = {} meta["pagination"] = page_info.as_dict(num_results=len(data)) if opts.output == "pretty_json": dump = json.dumps(root, indent=4, sort_keys=True) else: dump = json.dumps(root, sort_keys=True) click.echo(dump) return True
['def', 'maybe_print_as_json', '(', 'opts', ',', 'data', ',', 'page_info', '=', 'None', ')', ':', 'if', 'opts', '.', 'output', 'not', 'in', '(', '"json"', ',', '"pretty_json"', ')', ':', 'return', 'False', 'root', '=', '{', '"data"', ':', 'data', '}', 'if', 'page_info', 'is', 'not', 'None', 'and', 'page_info', '.', 'is_valid', ':', 'meta', '=', 'root', '[', '"meta"', ']', '=', '{', '}', 'meta', '[', '"pagination"', ']', '=', 'page_info', '.', 'as_dict', '(', 'num_results', '=', 'len', '(', 'data', ')', ')', 'if', 'opts', '.', 'output', '==', '"pretty_json"', ':', 'dump', '=', 'json', '.', 'dumps', '(', 'root', ',', 'indent', '=', '4', ',', 'sort_keys', '=', 'True', ')', 'else', ':', 'dump', '=', 'json', '.', 'dumps', '(', 'root', ',', 'sort_keys', '=', 'True', ')', 'click', '.', 'echo', '(', 'dump', ')', 'return', 'True']
Maybe print data as JSON.
['Maybe', 'print', 'data', 'as', 'JSON', '.']
train
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/utils.py#L107-L124
1,561
ahmontero/dop
dop/client.py
Client.destroy_domain_record
def destroy_domain_record(self, domain_id, record_id): """ This method deletes the specified domain record. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to destroy a record. record_id: Integer, specifies the record_id to destroy. """ json = self.request('/domains/%s/records/%s/destroy' % (domain_id, record_id), method='GET') status = json.get('status') return status
python
def destroy_domain_record(self, domain_id, record_id): """ This method deletes the specified domain record. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to destroy a record. record_id: Integer, specifies the record_id to destroy. """ json = self.request('/domains/%s/records/%s/destroy' % (domain_id, record_id), method='GET') status = json.get('status') return status
['def', 'destroy_domain_record', '(', 'self', ',', 'domain_id', ',', 'record_id', ')', ':', 'json', '=', 'self', '.', 'request', '(', "'/domains/%s/records/%s/destroy'", '%', '(', 'domain_id', ',', 'record_id', ')', ',', 'method', '=', "'GET'", ')', 'status', '=', 'json', '.', 'get', '(', "'status'", ')', 'return', 'status']
This method deletes the specified domain record. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to destroy a record. record_id: Integer, specifies the record_id to destroy.
['This', 'method', 'deletes', 'the', 'specified', 'domain', 'record', '.']
train
https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L877-L893
1,562
saxix/django-concurrency
src/concurrency/utils.py
flatten
def flatten(iterable): """ flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). :param sequence: any object that implements iterable protocol (see: :ref:`typeiter`) :return: list Examples: >>> from adminactions.utils import flatten >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = list() for el in iterable: if hasattr(el, "__iter__") and not isinstance(el, str): result.extend(flatten(el)) else: result.append(el) return list(result)
python
def flatten(iterable): """ flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). :param sequence: any object that implements iterable protocol (see: :ref:`typeiter`) :return: list Examples: >>> from adminactions.utils import flatten >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = list() for el in iterable: if hasattr(el, "__iter__") and not isinstance(el, str): result.extend(flatten(el)) else: result.append(el) return list(result)
['def', 'flatten', '(', 'iterable', ')', ':', 'result', '=', 'list', '(', ')', 'for', 'el', 'in', 'iterable', ':', 'if', 'hasattr', '(', 'el', ',', '"__iter__"', ')', 'and', 'not', 'isinstance', '(', 'el', ',', 'str', ')', ':', 'result', '.', 'extend', '(', 'flatten', '(', 'el', ')', ')', 'else', ':', 'result', '.', 'append', '(', 'el', ')', 'return', 'list', '(', 'result', ')']
flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). :param sequence: any object that implements iterable protocol (see: :ref:`typeiter`) :return: list Examples: >>> from adminactions.utils import flatten >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
['flatten', '(', 'sequence', ')', '-', '>', 'list']
train
https://github.com/saxix/django-concurrency/blob/9a289dc007b1cdf609b7dfb77a6d2868abc8097f/src/concurrency/utils.py#L188-L214
1,563
ubernostrum/django-flashpolicies
flashpolicies/policies.py
Policy.allow_headers
def allow_headers(self, domain, headers, secure=True): """ Allows ``domain`` to push data via the HTTP headers named in ``headers``. As with ``allow_domain``, ``domain`` may be either a full domain name or a wildcard. Again, use of wildcards is discouraged for security reasons. The value for ``headers`` should be a list of header names. To disable Flash's requirement of security matching (e.g., retrieving a policy via HTTPS will require that SWFs also be retrieved via HTTPS), pass ``secure=False``. Due to security concerns, it is strongly recommended that you not disable this. """ if self.site_control == SITE_CONTROL_NONE: raise TypeError( METAPOLICY_ERROR.format("allow headers from a domain") ) self.header_domains[domain] = {'headers': headers, 'secure': secure}
python
def allow_headers(self, domain, headers, secure=True): """ Allows ``domain`` to push data via the HTTP headers named in ``headers``. As with ``allow_domain``, ``domain`` may be either a full domain name or a wildcard. Again, use of wildcards is discouraged for security reasons. The value for ``headers`` should be a list of header names. To disable Flash's requirement of security matching (e.g., retrieving a policy via HTTPS will require that SWFs also be retrieved via HTTPS), pass ``secure=False``. Due to security concerns, it is strongly recommended that you not disable this. """ if self.site_control == SITE_CONTROL_NONE: raise TypeError( METAPOLICY_ERROR.format("allow headers from a domain") ) self.header_domains[domain] = {'headers': headers, 'secure': secure}
['def', 'allow_headers', '(', 'self', ',', 'domain', ',', 'headers', ',', 'secure', '=', 'True', ')', ':', 'if', 'self', '.', 'site_control', '==', 'SITE_CONTROL_NONE', ':', 'raise', 'TypeError', '(', 'METAPOLICY_ERROR', '.', 'format', '(', '"allow headers from a domain"', ')', ')', 'self', '.', 'header_domains', '[', 'domain', ']', '=', '{', "'headers'", ':', 'headers', ',', "'secure'", ':', 'secure', '}']
Allows ``domain`` to push data via the HTTP headers named in ``headers``. As with ``allow_domain``, ``domain`` may be either a full domain name or a wildcard. Again, use of wildcards is discouraged for security reasons. The value for ``headers`` should be a list of header names. To disable Flash's requirement of security matching (e.g., retrieving a policy via HTTPS will require that SWFs also be retrieved via HTTPS), pass ``secure=False``. Due to security concerns, it is strongly recommended that you not disable this.
['Allows', 'domain', 'to', 'push', 'data', 'via', 'the', 'HTTP', 'headers', 'named', 'in', 'headers', '.']
train
https://github.com/ubernostrum/django-flashpolicies/blob/fb04693504186dde859cce97bad6e83d2b380dc6/flashpolicies/policies.py#L140-L163
1,564
mikedh/trimesh
trimesh/path/polygons.py
edges_to_polygons
def edges_to_polygons(edges, vertices): """ Given an edge list of indices and associated vertices representing lines, generate a list of polygons. Parameters ----------- edges : (n, 2) int Indexes of vertices which represent lines vertices : (m, 2) float Vertices in 2D space Returns ---------- polygons : (p,) shapely.geometry.Polygon Polygon objects with interiors """ # create closed polygon objects polygons = [] # loop through a sequence of ordered traversals for dfs in graph.traversals(edges, mode='dfs'): try: # try to recover polygons before they are more complicated polygons.append(repair_invalid(Polygon(vertices[dfs]))) except ValueError: continue # if there is only one polygon, just return it if len(polygons) == 1: return polygons # find which polygons contain which other polygons roots, tree = enclosure_tree(polygons) # generate list of polygons with proper interiors complete = [] for root in roots: interior = list(tree[root].keys()) shell = polygons[root].exterior.coords holes = [polygons[i].exterior.coords for i in interior] complete.append(Polygon(shell=shell, holes=holes)) return complete
python
def edges_to_polygons(edges, vertices): """ Given an edge list of indices and associated vertices representing lines, generate a list of polygons. Parameters ----------- edges : (n, 2) int Indexes of vertices which represent lines vertices : (m, 2) float Vertices in 2D space Returns ---------- polygons : (p,) shapely.geometry.Polygon Polygon objects with interiors """ # create closed polygon objects polygons = [] # loop through a sequence of ordered traversals for dfs in graph.traversals(edges, mode='dfs'): try: # try to recover polygons before they are more complicated polygons.append(repair_invalid(Polygon(vertices[dfs]))) except ValueError: continue # if there is only one polygon, just return it if len(polygons) == 1: return polygons # find which polygons contain which other polygons roots, tree = enclosure_tree(polygons) # generate list of polygons with proper interiors complete = [] for root in roots: interior = list(tree[root].keys()) shell = polygons[root].exterior.coords holes = [polygons[i].exterior.coords for i in interior] complete.append(Polygon(shell=shell, holes=holes)) return complete
['def', 'edges_to_polygons', '(', 'edges', ',', 'vertices', ')', ':', '# create closed polygon objects', 'polygons', '=', '[', ']', '# loop through a sequence of ordered traversals', 'for', 'dfs', 'in', 'graph', '.', 'traversals', '(', 'edges', ',', 'mode', '=', "'dfs'", ')', ':', 'try', ':', '# try to recover polygons before they are more complicated', 'polygons', '.', 'append', '(', 'repair_invalid', '(', 'Polygon', '(', 'vertices', '[', 'dfs', ']', ')', ')', ')', 'except', 'ValueError', ':', 'continue', '# if there is only one polygon, just return it', 'if', 'len', '(', 'polygons', ')', '==', '1', ':', 'return', 'polygons', '# find which polygons contain which other polygons', 'roots', ',', 'tree', '=', 'enclosure_tree', '(', 'polygons', ')', '# generate list of polygons with proper interiors', 'complete', '=', '[', ']', 'for', 'root', 'in', 'roots', ':', 'interior', '=', 'list', '(', 'tree', '[', 'root', ']', '.', 'keys', '(', ')', ')', 'shell', '=', 'polygons', '[', 'root', ']', '.', 'exterior', '.', 'coords', 'holes', '=', '[', 'polygons', '[', 'i', ']', '.', 'exterior', '.', 'coords', 'for', 'i', 'in', 'interior', ']', 'complete', '.', 'append', '(', 'Polygon', '(', 'shell', '=', 'shell', ',', 'holes', '=', 'holes', ')', ')', 'return', 'complete']
Given an edge list of indices and associated vertices representing lines, generate a list of polygons. Parameters ----------- edges : (n, 2) int Indexes of vertices which represent lines vertices : (m, 2) float Vertices in 2D space Returns ---------- polygons : (p,) shapely.geometry.Polygon Polygon objects with interiors
['Given', 'an', 'edge', 'list', 'of', 'indices', 'and', 'associated', 'vertices', 'representing', 'lines', 'generate', 'a', 'list', 'of', 'polygons', '.']
train
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/polygons.py#L99-L142
1,565
saltstack/salt
salt/cloud/clouds/joyent.py
reformat_node
def reformat_node(item=None, full=False): ''' Reformat the returned data from joyent, determine public/private IPs and strip out fields if necessary to provide either full or brief content. :param item: node dictionary :param full: full or brief output :return: dict ''' desired_keys = [ 'id', 'name', 'state', 'public_ips', 'private_ips', 'size', 'image', 'location' ] item['private_ips'] = [] item['public_ips'] = [] if 'ips' in item: for ip in item['ips']: if salt.utils.cloud.is_public_ip(ip): item['public_ips'].append(ip) else: item['private_ips'].append(ip) # add any undefined desired keys for key in desired_keys: if key not in item: item[key] = None # remove all the extra key value pairs to provide a brief listing to_del = [] if not full: for key in six.iterkeys(item): # iterate over a copy of the keys if key not in desired_keys: to_del.append(key) for key in to_del: del item[key] if 'state' in item: item['state'] = joyent_node_state(item['state']) return item
python
def reformat_node(item=None, full=False): ''' Reformat the returned data from joyent, determine public/private IPs and strip out fields if necessary to provide either full or brief content. :param item: node dictionary :param full: full or brief output :return: dict ''' desired_keys = [ 'id', 'name', 'state', 'public_ips', 'private_ips', 'size', 'image', 'location' ] item['private_ips'] = [] item['public_ips'] = [] if 'ips' in item: for ip in item['ips']: if salt.utils.cloud.is_public_ip(ip): item['public_ips'].append(ip) else: item['private_ips'].append(ip) # add any undefined desired keys for key in desired_keys: if key not in item: item[key] = None # remove all the extra key value pairs to provide a brief listing to_del = [] if not full: for key in six.iterkeys(item): # iterate over a copy of the keys if key not in desired_keys: to_del.append(key) for key in to_del: del item[key] if 'state' in item: item['state'] = joyent_node_state(item['state']) return item
['def', 'reformat_node', '(', 'item', '=', 'None', ',', 'full', '=', 'False', ')', ':', 'desired_keys', '=', '[', "'id'", ',', "'name'", ',', "'state'", ',', "'public_ips'", ',', "'private_ips'", ',', "'size'", ',', "'image'", ',', "'location'", ']', 'item', '[', "'private_ips'", ']', '=', '[', ']', 'item', '[', "'public_ips'", ']', '=', '[', ']', 'if', "'ips'", 'in', 'item', ':', 'for', 'ip', 'in', 'item', '[', "'ips'", ']', ':', 'if', 'salt', '.', 'utils', '.', 'cloud', '.', 'is_public_ip', '(', 'ip', ')', ':', 'item', '[', "'public_ips'", ']', '.', 'append', '(', 'ip', ')', 'else', ':', 'item', '[', "'private_ips'", ']', '.', 'append', '(', 'ip', ')', '# add any undefined desired keys', 'for', 'key', 'in', 'desired_keys', ':', 'if', 'key', 'not', 'in', 'item', ':', 'item', '[', 'key', ']', '=', 'None', '# remove all the extra key value pairs to provide a brief listing', 'to_del', '=', '[', ']', 'if', 'not', 'full', ':', 'for', 'key', 'in', 'six', '.', 'iterkeys', '(', 'item', ')', ':', '# iterate over a copy of the keys', 'if', 'key', 'not', 'in', 'desired_keys', ':', 'to_del', '.', 'append', '(', 'key', ')', 'for', 'key', 'in', 'to_del', ':', 'del', 'item', '[', 'key', ']', 'if', "'state'", 'in', 'item', ':', 'item', '[', "'state'", ']', '=', 'joyent_node_state', '(', 'item', '[', "'state'", ']', ')', 'return', 'item']
Reformat the returned data from joyent, determine public/private IPs and strip out fields if necessary to provide either full or brief content. :param item: node dictionary :param full: full or brief output :return: dict
['Reformat', 'the', 'returned', 'data', 'from', 'joyent', 'determine', 'public', '/', 'private', 'IPs', 'and', 'strip', 'out', 'fields', 'if', 'necessary', 'to', 'provide', 'either', 'full', 'or', 'brief', 'content', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L674-L714
1,566
ranaroussi/ezibpy
ezibpy/ezibpy.py
ezIBpy.createCashContract
def createCashContract(self, symbol, currency="USD", exchange="IDEALPRO"): """ Used for FX, etc: createCashContract("EUR", currency="USD") """ contract_tuple = (symbol, "CASH", exchange, currency, "", 0.0, "") contract = self.createContract(contract_tuple) return contract
python
def createCashContract(self, symbol, currency="USD", exchange="IDEALPRO"): """ Used for FX, etc: createCashContract("EUR", currency="USD") """ contract_tuple = (symbol, "CASH", exchange, currency, "", 0.0, "") contract = self.createContract(contract_tuple) return contract
['def', 'createCashContract', '(', 'self', ',', 'symbol', ',', 'currency', '=', '"USD"', ',', 'exchange', '=', '"IDEALPRO"', ')', ':', 'contract_tuple', '=', '(', 'symbol', ',', '"CASH"', ',', 'exchange', ',', 'currency', ',', '""', ',', '0.0', ',', '""', ')', 'contract', '=', 'self', '.', 'createContract', '(', 'contract_tuple', ')', 'return', 'contract']
Used for FX, etc: createCashContract("EUR", currency="USD")
['Used', 'for', 'FX', 'etc', ':', 'createCashContract', '(', 'EUR', 'currency', '=', 'USD', ')']
train
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1520-L1526
1,567
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
image_summary
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
python
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
['def', 'image_summary', '(', 'predictions', ',', 'targets', ',', 'hparams', ')', ':', 'del', 'hparams', 'results', '=', 'tf', '.', 'cast', '(', 'tf', '.', 'argmax', '(', 'predictions', ',', 'axis', '=', '-', '1', ')', ',', 'tf', '.', 'uint8', ')', 'gold', '=', 'tf', '.', 'cast', '(', 'targets', ',', 'tf', '.', 'uint8', ')', 'summary1', '=', 'tf', '.', 'summary', '.', 'image', '(', '"prediction"', ',', 'results', ',', 'max_outputs', '=', '2', ')', 'summary2', '=', 'tf', '.', 'summary', '.', 'image', '(', '"data"', ',', 'gold', ',', 'max_outputs', '=', '2', ')', 'summary', '=', 'tf', '.', 'summary', '.', 'merge', '(', '[', 'summary1', ',', 'summary2', ']', ')', 'return', 'summary', ',', 'tf', '.', 'zeros_like', '(', 'predictions', ')']
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions.
['Reshapes', 'predictions', 'and', 'passes', 'it', 'to', 'tensorboard', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L396-L414
1,568
numenta/htmresearch
projects/combined_sequences/combined_sequences.py
runExperiment4B
def runExperiment4B(dirName): """ This runs the second experiment in the section "Simulations with Pure Temporal Sequences". Here we check accuracy of the L2/L4 networks in classifying the sequences. This experiment averages over many parameter combinations and could take several minutes. """ # Results are put into a pkl file which can be used to generate the plots. # dirName is the absolute path where the pkl file will be placed. resultsName = os.path.join(dirName, "sequence_batch_high_dec_normal_features.pkl") numTrials = 10 featureRange = [10, 50, 100, 200] seqRange = [50] locationRange = [10, 100, 200, 300, 400, 500] runExperimentPool( numSequences=seqRange, numFeatures=featureRange, numLocations=locationRange, numObjects=[0], seqLength=10, nTrials=numTrials, numWorkers=cpu_count()-1, basalPredictedSegmentDecrement=[0.005], resultsName=resultsName)
python
def runExperiment4B(dirName): """ This runs the second experiment in the section "Simulations with Pure Temporal Sequences". Here we check accuracy of the L2/L4 networks in classifying the sequences. This experiment averages over many parameter combinations and could take several minutes. """ # Results are put into a pkl file which can be used to generate the plots. # dirName is the absolute path where the pkl file will be placed. resultsName = os.path.join(dirName, "sequence_batch_high_dec_normal_features.pkl") numTrials = 10 featureRange = [10, 50, 100, 200] seqRange = [50] locationRange = [10, 100, 200, 300, 400, 500] runExperimentPool( numSequences=seqRange, numFeatures=featureRange, numLocations=locationRange, numObjects=[0], seqLength=10, nTrials=numTrials, numWorkers=cpu_count()-1, basalPredictedSegmentDecrement=[0.005], resultsName=resultsName)
['def', 'runExperiment4B', '(', 'dirName', ')', ':', '# Results are put into a pkl file which can be used to generate the plots.', '# dirName is the absolute path where the pkl file will be placed.', 'resultsName', '=', 'os', '.', 'path', '.', 'join', '(', 'dirName', ',', '"sequence_batch_high_dec_normal_features.pkl"', ')', 'numTrials', '=', '10', 'featureRange', '=', '[', '10', ',', '50', ',', '100', ',', '200', ']', 'seqRange', '=', '[', '50', ']', 'locationRange', '=', '[', '10', ',', '100', ',', '200', ',', '300', ',', '400', ',', '500', ']', 'runExperimentPool', '(', 'numSequences', '=', 'seqRange', ',', 'numFeatures', '=', 'featureRange', ',', 'numLocations', '=', 'locationRange', ',', 'numObjects', '=', '[', '0', ']', ',', 'seqLength', '=', '10', ',', 'nTrials', '=', 'numTrials', ',', 'numWorkers', '=', 'cpu_count', '(', ')', '-', '1', ',', 'basalPredictedSegmentDecrement', '=', '[', '0.005', ']', ',', 'resultsName', '=', 'resultsName', ')']
This runs the second experiment in the section "Simulations with Pure Temporal Sequences". Here we check accuracy of the L2/L4 networks in classifying the sequences. This experiment averages over many parameter combinations and could take several minutes.
['This', 'runs', 'the', 'second', 'experiment', 'in', 'the', 'section', 'Simulations', 'with', 'Pure', 'Temporal', 'Sequences', '.', 'Here', 'we', 'check', 'accuracy', 'of', 'the', 'L2', '/', 'L4', 'networks', 'in', 'classifying', 'the', 'sequences', '.', 'This', 'experiment', 'averages', 'over', 'many', 'parameter', 'combinations', 'and', 'could', 'take', 'several', 'minutes', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/combined_sequences/combined_sequences.py#L703-L728
1,569
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py
brocade_arp.hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet
def hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop('arp_ip_address') interfacetype = ET.SubElement(arp_entry, "interfacetype") HundredGigabitEthernet = ET.SubElement(interfacetype, "HundredGigabitEthernet") HundredGigabitEthernet = ET.SubElement(HundredGigabitEthernet, "HundredGigabitEthernet") HundredGigabitEthernet.text = kwargs.pop('HundredGigabitEthernet') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop('arp_ip_address') interfacetype = ET.SubElement(arp_entry, "interfacetype") HundredGigabitEthernet = ET.SubElement(interfacetype, "HundredGigabitEthernet") HundredGigabitEthernet = ET.SubElement(HundredGigabitEthernet, "HundredGigabitEthernet") HundredGigabitEthernet.text = kwargs.pop('HundredGigabitEthernet') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'hide_arp_holder', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"hide-arp-holder"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-arp"', ')', 'arp_entry', '=', 'ET', '.', 'SubElement', '(', 'hide_arp_holder', ',', '"arp-entry"', ')', 'arp_ip_address_key', '=', 'ET', '.', 'SubElement', '(', 'arp_entry', ',', '"arp-ip-address"', ')', 'arp_ip_address_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'arp_ip_address'", ')', 'interfacetype', '=', 'ET', '.', 'SubElement', '(', 'arp_entry', ',', '"interfacetype"', ')', 'HundredGigabitEthernet', '=', 'ET', '.', 'SubElement', '(', 'interfacetype', ',', '"HundredGigabitEthernet"', ')', 'HundredGigabitEthernet', '=', 'ET', '.', 'SubElement', '(', 'HundredGigabitEthernet', ',', '"HundredGigabitEthernet"', ')', 'HundredGigabitEthernet', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'HundredGigabitEthernet'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py#L128-L142
1,570
Geotab/mygeotab-python
mygeotab/api.py
server_call
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters): """Makes a call to an un-authenticated method on a server :param method: The method name. :type method: str :param server: The MyGeotab server. :type server: str :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The result from the server. """ if method is None: raise Exception("A method name must be specified") if server is None: raise Exception("A server (eg. my3.geotab.com) must be specified") parameters = process_parameters(parameters) return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl)
python
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters): """Makes a call to an un-authenticated method on a server :param method: The method name. :type method: str :param server: The MyGeotab server. :type server: str :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The result from the server. """ if method is None: raise Exception("A method name must be specified") if server is None: raise Exception("A server (eg. my3.geotab.com) must be specified") parameters = process_parameters(parameters) return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl)
['def', 'server_call', '(', 'method', ',', 'server', ',', 'timeout', '=', 'DEFAULT_TIMEOUT', ',', 'verify_ssl', '=', 'True', ',', '*', '*', 'parameters', ')', ':', 'if', 'method', 'is', 'None', ':', 'raise', 'Exception', '(', '"A method name must be specified"', ')', 'if', 'server', 'is', 'None', ':', 'raise', 'Exception', '(', '"A server (eg. my3.geotab.com) must be specified"', ')', 'parameters', '=', 'process_parameters', '(', 'parameters', ')', 'return', '_query', '(', 'server', ',', 'method', ',', 'parameters', ',', 'timeout', '=', 'timeout', ',', 'verify_ssl', '=', 'verify_ssl', ')']
Makes a call to an un-authenticated method on a server :param method: The method name. :type method: str :param server: The MyGeotab server. :type server: str :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The result from the server.
['Makes', 'a', 'call', 'to', 'an', 'un', '-', 'authenticated', 'method', 'on', 'a', 'server']
train
https://github.com/Geotab/mygeotab-python/blob/baa678e7df90bdd15f5dc55c1374b5c048791a94/mygeotab/api.py#L336-L357
1,571
MacHu-GWU/dataIO-project
dataIO/zzz_manual_install.py
find_venv_DST
def find_venv_DST(): """Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name`` """ dir_path = os.path.dirname(SRC) if SYS_NAME == "Windows": DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: python_version = find_linux_venv_py_version() DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME) return DST
python
def find_venv_DST(): """Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name`` """ dir_path = os.path.dirname(SRC) if SYS_NAME == "Windows": DST = os.path.join(dir_path, "Lib", "site-packages", PKG_NAME) elif SYS_NAME in ["Darwin", "Linux"]: python_version = find_linux_venv_py_version() DST = os.path.join(dir_path, "lib", python_version, "site-packages", PKG_NAME) return DST
['def', 'find_venv_DST', '(', ')', ':', 'dir_path', '=', 'os', '.', 'path', '.', 'dirname', '(', 'SRC', ')', 'if', 'SYS_NAME', '==', '"Windows"', ':', 'DST', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', '"Lib"', ',', '"site-packages"', ',', 'PKG_NAME', ')', 'elif', 'SYS_NAME', 'in', '[', '"Darwin"', ',', '"Linux"', ']', ':', 'python_version', '=', 'find_linux_venv_py_version', '(', ')', 'DST', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', '"lib"', ',', 'python_version', ',', '"site-packages"', ',', 'PKG_NAME', ')', 'return', 'DST']
Find where this package should be installed to in this virtualenv. For example: ``/path-to-venv/lib/python2.7/site-packages/package-name``
['Find', 'where', 'this', 'package', 'should', 'be', 'installed', 'to', 'in', 'this', 'virtualenv', '.']
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/zzz_manual_install.py#L87-L100
1,572
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/mavproxy.py
unload_module
def unload_module(modname): '''unload a module''' for (m,pm) in mpstate.modules: if m.name == modname: if hasattr(m, 'unload'): m.unload() mpstate.modules.remove((m,pm)) print("Unloaded module %s" % modname) return True print("Unable to find module %s" % modname) return False
python
def unload_module(modname): '''unload a module''' for (m,pm) in mpstate.modules: if m.name == modname: if hasattr(m, 'unload'): m.unload() mpstate.modules.remove((m,pm)) print("Unloaded module %s" % modname) return True print("Unable to find module %s" % modname) return False
['def', 'unload_module', '(', 'modname', ')', ':', 'for', '(', 'm', ',', 'pm', ')', 'in', 'mpstate', '.', 'modules', ':', 'if', 'm', '.', 'name', '==', 'modname', ':', 'if', 'hasattr', '(', 'm', ',', "'unload'", ')', ':', 'm', '.', 'unload', '(', ')', 'mpstate', '.', 'modules', '.', 'remove', '(', '(', 'm', ',', 'pm', ')', ')', 'print', '(', '"Unloaded module %s"', '%', 'modname', ')', 'return', 'True', 'print', '(', '"Unable to find module %s"', '%', 'modname', ')', 'return', 'False']
unload a module
['unload', 'a', 'module']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/mavproxy.py#L313-L323
1,573
mushkevych/scheduler
synergy/mq/flopsy.py
PublishersPool._close
def _close(self, name, suppress_logging): """ closes one particular pool and all its amqp amqp connections """ try: pool_names = list(self.pools) if name in pool_names: self.pools[name].close() del self.pools[name] except Exception as e: self.logger.error('Exception on closing Flopsy Pool for {0}: {1}'.format(name, e), exc_info=not suppress_logging)
python
def _close(self, name, suppress_logging): """ closes one particular pool and all its amqp amqp connections """ try: pool_names = list(self.pools) if name in pool_names: self.pools[name].close() del self.pools[name] except Exception as e: self.logger.error('Exception on closing Flopsy Pool for {0}: {1}'.format(name, e), exc_info=not suppress_logging)
['def', '_close', '(', 'self', ',', 'name', ',', 'suppress_logging', ')', ':', 'try', ':', 'pool_names', '=', 'list', '(', 'self', '.', 'pools', ')', 'if', 'name', 'in', 'pool_names', ':', 'self', '.', 'pools', '[', 'name', ']', '.', 'close', '(', ')', 'del', 'self', '.', 'pools', '[', 'name', ']', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'logger', '.', 'error', '(', "'Exception on closing Flopsy Pool for {0}: {1}'", '.', 'format', '(', 'name', ',', 'e', ')', ',', 'exc_info', '=', 'not', 'suppress_logging', ')']
closes one particular pool and all its amqp amqp connections
['closes', 'one', 'particular', 'pool', 'and', 'all', 'its', 'amqp', 'amqp', 'connections']
train
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/mq/flopsy.py#L263-L272
1,574
prompt-toolkit/pyvim
pyvim/layout.py
EditorLayout._create_window_frame
def _create_window_frame(self, editor_buffer): """ Create a Window for the buffer, with underneat a status bar. """ @Condition def wrap_lines(): return self.editor.wrap_lines window = Window( self._create_buffer_control(editor_buffer), allow_scroll_beyond_bottom=True, scroll_offsets=ScrollOffsets( left=0, right=0, top=(lambda: self.editor.scroll_offset), bottom=(lambda: self.editor.scroll_offset)), wrap_lines=wrap_lines, left_margins=[ConditionalMargin( margin=NumberedMargin( display_tildes=True, relative=Condition(lambda: self.editor.relative_number)), filter=Condition(lambda: self.editor.show_line_numbers))], cursorline=Condition(lambda: self.editor.cursorline), cursorcolumn=Condition(lambda: self.editor.cursorcolumn), colorcolumns=( lambda: [ColorColumn(pos) for pos in self.editor.colorcolumn]), ignore_content_width=True, ignore_content_height=True, get_line_prefix=partial(self._get_line_prefix, editor_buffer.buffer)) return HSplit([ window, VSplit([ WindowStatusBar(self.editor, editor_buffer), WindowStatusBarRuler(self.editor, window, editor_buffer.buffer), ], width=Dimension()), # Ignore actual status bar width. ]), window
python
def _create_window_frame(self, editor_buffer): """ Create a Window for the buffer, with underneat a status bar. """ @Condition def wrap_lines(): return self.editor.wrap_lines window = Window( self._create_buffer_control(editor_buffer), allow_scroll_beyond_bottom=True, scroll_offsets=ScrollOffsets( left=0, right=0, top=(lambda: self.editor.scroll_offset), bottom=(lambda: self.editor.scroll_offset)), wrap_lines=wrap_lines, left_margins=[ConditionalMargin( margin=NumberedMargin( display_tildes=True, relative=Condition(lambda: self.editor.relative_number)), filter=Condition(lambda: self.editor.show_line_numbers))], cursorline=Condition(lambda: self.editor.cursorline), cursorcolumn=Condition(lambda: self.editor.cursorcolumn), colorcolumns=( lambda: [ColorColumn(pos) for pos in self.editor.colorcolumn]), ignore_content_width=True, ignore_content_height=True, get_line_prefix=partial(self._get_line_prefix, editor_buffer.buffer)) return HSplit([ window, VSplit([ WindowStatusBar(self.editor, editor_buffer), WindowStatusBarRuler(self.editor, window, editor_buffer.buffer), ], width=Dimension()), # Ignore actual status bar width. ]), window
['def', '_create_window_frame', '(', 'self', ',', 'editor_buffer', ')', ':', '@', 'Condition', 'def', 'wrap_lines', '(', ')', ':', 'return', 'self', '.', 'editor', '.', 'wrap_lines', 'window', '=', 'Window', '(', 'self', '.', '_create_buffer_control', '(', 'editor_buffer', ')', ',', 'allow_scroll_beyond_bottom', '=', 'True', ',', 'scroll_offsets', '=', 'ScrollOffsets', '(', 'left', '=', '0', ',', 'right', '=', '0', ',', 'top', '=', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'scroll_offset', ')', ',', 'bottom', '=', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'scroll_offset', ')', ')', ',', 'wrap_lines', '=', 'wrap_lines', ',', 'left_margins', '=', '[', 'ConditionalMargin', '(', 'margin', '=', 'NumberedMargin', '(', 'display_tildes', '=', 'True', ',', 'relative', '=', 'Condition', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'relative_number', ')', ')', ',', 'filter', '=', 'Condition', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'show_line_numbers', ')', ')', ']', ',', 'cursorline', '=', 'Condition', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'cursorline', ')', ',', 'cursorcolumn', '=', 'Condition', '(', 'lambda', ':', 'self', '.', 'editor', '.', 'cursorcolumn', ')', ',', 'colorcolumns', '=', '(', 'lambda', ':', '[', 'ColorColumn', '(', 'pos', ')', 'for', 'pos', 'in', 'self', '.', 'editor', '.', 'colorcolumn', ']', ')', ',', 'ignore_content_width', '=', 'True', ',', 'ignore_content_height', '=', 'True', ',', 'get_line_prefix', '=', 'partial', '(', 'self', '.', '_get_line_prefix', ',', 'editor_buffer', '.', 'buffer', ')', ')', 'return', 'HSplit', '(', '[', 'window', ',', 'VSplit', '(', '[', 'WindowStatusBar', '(', 'self', '.', 'editor', ',', 'editor_buffer', ')', ',', 'WindowStatusBarRuler', '(', 'self', '.', 'editor', ',', 'window', ',', 'editor_buffer', '.', 'buffer', ')', ',', ']', ',', 'width', '=', 'Dimension', '(', ')', ')', ',', '# Ignore actual status bar width.', ']', ')', ',', 'window']
Create a Window for the buffer, with underneat a status bar.
['Create', 'a', 'Window', 'for', 'the', 'buffer', 'with', 'underneat', 'a', 'status', 'bar', '.']
train
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/layout.py#L529-L564
1,575
Kortemme-Lab/klab
klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
_compare_mp_alias
def _compare_mp_alias(br_i, br_j, analysis_set, analysis_set_subdir, unique_ajps, verbose): """ Alias for instance method that allows the method to be called in a multiprocessing pool. Needed as multiprocessing does not otherwise work on object instance methods. """ return br_i.compare(br_j, analysis_set, analysis_set_subdir, unique_ajps, verbose = verbose, compile_pdf = verbose)
python
def _compare_mp_alias(br_i, br_j, analysis_set, analysis_set_subdir, unique_ajps, verbose): """ Alias for instance method that allows the method to be called in a multiprocessing pool. Needed as multiprocessing does not otherwise work on object instance methods. """ return br_i.compare(br_j, analysis_set, analysis_set_subdir, unique_ajps, verbose = verbose, compile_pdf = verbose)
['def', '_compare_mp_alias', '(', 'br_i', ',', 'br_j', ',', 'analysis_set', ',', 'analysis_set_subdir', ',', 'unique_ajps', ',', 'verbose', ')', ':', 'return', 'br_i', '.', 'compare', '(', 'br_j', ',', 'analysis_set', ',', 'analysis_set_subdir', ',', 'unique_ajps', ',', 'verbose', '=', 'verbose', ',', 'compile_pdf', '=', 'verbose', ')']
Alias for instance method that allows the method to be called in a multiprocessing pool. Needed as multiprocessing does not otherwise work on object instance methods.
['Alias', 'for', 'instance', 'method', 'that', 'allows', 'the', 'method', 'to', 'be', 'called', 'in', 'a', 'multiprocessing', 'pool', '.', 'Needed', 'as', 'multiprocessing', 'does', 'not', 'otherwise', 'work', 'on', 'object', 'instance', 'methods', '.']
train
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L2244-L2250
1,576
bokeh/bokeh
bokeh/client/session.py
push_session
def push_session(document, session_id=None, url='default', io_loop=None): ''' Create a session by pushing the given document to the server, overwriting any existing server-side document. ``session.document`` in the returned session will be your supplied document. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``push_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: document : (bokeh.document.Document) The document to be pushed and set as session.document session_id : (string, optional) The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop : (tornado.ioloop.IOLoop, optional) The IOLoop to use for the websocket Returns: ClientSession A new ClientSession connected to the server ''' coords = _SessionCoordinates(session_id=session_id, url=url) session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop) session.push(document) return session
python
def push_session(document, session_id=None, url='default', io_loop=None): ''' Create a session by pushing the given document to the server, overwriting any existing server-side document. ``session.document`` in the returned session will be your supplied document. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``push_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: document : (bokeh.document.Document) The document to be pushed and set as session.document session_id : (string, optional) The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop : (tornado.ioloop.IOLoop, optional) The IOLoop to use for the websocket Returns: ClientSession A new ClientSession connected to the server ''' coords = _SessionCoordinates(session_id=session_id, url=url) session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop) session.push(document) return session
['def', 'push_session', '(', 'document', ',', 'session_id', '=', 'None', ',', 'url', '=', "'default'", ',', 'io_loop', '=', 'None', ')', ':', 'coords', '=', '_SessionCoordinates', '(', 'session_id', '=', 'session_id', ',', 'url', '=', 'url', ')', 'session', '=', 'ClientSession', '(', 'session_id', '=', 'coords', '.', 'session_id', ',', 'websocket_url', '=', 'websocket_url_for_server_url', '(', 'coords', '.', 'url', ')', ',', 'io_loop', '=', 'io_loop', ')', 'session', '.', 'push', '(', 'document', ')', 'return', 'session']
Create a session by pushing the given document to the server, overwriting any existing server-side document. ``session.document`` in the returned session will be your supplied document. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``push_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: document : (bokeh.document.Document) The document to be pushed and set as session.document session_id : (string, optional) The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop : (tornado.ioloop.IOLoop, optional) The IOLoop to use for the websocket Returns: ClientSession A new ClientSession connected to the server
['Create', 'a', 'session', 'by', 'pushing', 'the', 'given', 'document', 'to', 'the', 'server', 'overwriting', 'any', 'existing', 'server', '-', 'side', 'document', '.']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/client/session.py#L127-L169
1,577
has2k1/plotnine
plotnine/geoms/geom_path.py
_draw_segments
def _draw_segments(data, ax, **params): """ Draw independent line segments between all the points """ color = to_rgba(data['color'], data['alpha']) # All we do is line-up all the points in a group # into segments, all in a single list. # Along the way the other parameters are put in # sequences accordingly indices = [] # for attributes of starting point of each segment segments = [] for _, df in data.groupby('group'): idx = df.index indices.extend(idx[:-1]) # One line from two points x = data['x'].iloc[idx] y = data['y'].iloc[idx] segments.append(make_line_segments(x, y, ispath=True)) segments = np.vstack(segments) if color is None: edgecolor = color else: edgecolor = [color[i] for i in indices] linewidth = data.loc[indices, 'size'] linestyle = data.loc[indices, 'linetype'] coll = mcoll.LineCollection(segments, edgecolor=edgecolor, linewidth=linewidth, linestyle=linestyle, zorder=params['zorder']) ax.add_collection(coll)
python
def _draw_segments(data, ax, **params): """ Draw independent line segments between all the points """ color = to_rgba(data['color'], data['alpha']) # All we do is line-up all the points in a group # into segments, all in a single list. # Along the way the other parameters are put in # sequences accordingly indices = [] # for attributes of starting point of each segment segments = [] for _, df in data.groupby('group'): idx = df.index indices.extend(idx[:-1]) # One line from two points x = data['x'].iloc[idx] y = data['y'].iloc[idx] segments.append(make_line_segments(x, y, ispath=True)) segments = np.vstack(segments) if color is None: edgecolor = color else: edgecolor = [color[i] for i in indices] linewidth = data.loc[indices, 'size'] linestyle = data.loc[indices, 'linetype'] coll = mcoll.LineCollection(segments, edgecolor=edgecolor, linewidth=linewidth, linestyle=linestyle, zorder=params['zorder']) ax.add_collection(coll)
['def', '_draw_segments', '(', 'data', ',', 'ax', ',', '*', '*', 'params', ')', ':', 'color', '=', 'to_rgba', '(', 'data', '[', "'color'", ']', ',', 'data', '[', "'alpha'", ']', ')', '# All we do is line-up all the points in a group', '# into segments, all in a single list.', '# Along the way the other parameters are put in', '# sequences accordingly', 'indices', '=', '[', ']', '# for attributes of starting point of each segment', 'segments', '=', '[', ']', 'for', '_', ',', 'df', 'in', 'data', '.', 'groupby', '(', "'group'", ')', ':', 'idx', '=', 'df', '.', 'index', 'indices', '.', 'extend', '(', 'idx', '[', ':', '-', '1', ']', ')', '# One line from two points', 'x', '=', 'data', '[', "'x'", ']', '.', 'iloc', '[', 'idx', ']', 'y', '=', 'data', '[', "'y'", ']', '.', 'iloc', '[', 'idx', ']', 'segments', '.', 'append', '(', 'make_line_segments', '(', 'x', ',', 'y', ',', 'ispath', '=', 'True', ')', ')', 'segments', '=', 'np', '.', 'vstack', '(', 'segments', ')', 'if', 'color', 'is', 'None', ':', 'edgecolor', '=', 'color', 'else', ':', 'edgecolor', '=', '[', 'color', '[', 'i', ']', 'for', 'i', 'in', 'indices', ']', 'linewidth', '=', 'data', '.', 'loc', '[', 'indices', ',', "'size'", ']', 'linestyle', '=', 'data', '.', 'loc', '[', 'indices', ',', "'linetype'", ']', 'coll', '=', 'mcoll', '.', 'LineCollection', '(', 'segments', ',', 'edgecolor', '=', 'edgecolor', ',', 'linewidth', '=', 'linewidth', ',', 'linestyle', '=', 'linestyle', ',', 'zorder', '=', 'params', '[', "'zorder'", ']', ')', 'ax', '.', 'add_collection', '(', 'coll', ')']
Draw independent line segments between all the points
['Draw', 'independent', 'line', 'segments', 'between', 'all', 'the', 'points']
train
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_path.py#L341-L375
1,578
sernst/cauldron
cauldron/cli/server/run.py
get_running_step_changes
def get_running_step_changes(write: bool = False) -> list: """...""" project = cd.project.get_internal_project() running_steps = list(filter( lambda step: step.is_running, project.steps )) def get_changes(step): step_data = writing.step_writer.serialize(step) if write: writing.save(project, step_data.file_writes) return dict( name=step.definition.name, action='updated', step=step_data._asdict(), written=write ) return [get_changes(step) for step in running_steps]
python
def get_running_step_changes(write: bool = False) -> list: """...""" project = cd.project.get_internal_project() running_steps = list(filter( lambda step: step.is_running, project.steps )) def get_changes(step): step_data = writing.step_writer.serialize(step) if write: writing.save(project, step_data.file_writes) return dict( name=step.definition.name, action='updated', step=step_data._asdict(), written=write ) return [get_changes(step) for step in running_steps]
['def', 'get_running_step_changes', '(', 'write', ':', 'bool', '=', 'False', ')', '->', 'list', ':', 'project', '=', 'cd', '.', 'project', '.', 'get_internal_project', '(', ')', 'running_steps', '=', 'list', '(', 'filter', '(', 'lambda', 'step', ':', 'step', '.', 'is_running', ',', 'project', '.', 'steps', ')', ')', 'def', 'get_changes', '(', 'step', ')', ':', 'step_data', '=', 'writing', '.', 'step_writer', '.', 'serialize', '(', 'step', ')', 'if', 'write', ':', 'writing', '.', 'save', '(', 'project', ',', 'step_data', '.', 'file_writes', ')', 'return', 'dict', '(', 'name', '=', 'step', '.', 'definition', '.', 'name', ',', 'action', '=', "'updated'", ',', 'step', '=', 'step_data', '.', '_asdict', '(', ')', ',', 'written', '=', 'write', ')', 'return', '[', 'get_changes', '(', 'step', ')', 'for', 'step', 'in', 'running_steps', ']']
...
['...']
train
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/server/run.py#L53-L75
1,579
ska-sa/purr
Purr/Plugins/local_pychart/text_box.py
T.add_arrow
def add_arrow(self, tipLoc, tail=None, arrow=arrow.default): """This method adds a straight arrow that points to @var{TIPLOC}, which is a tuple of integers. @var{TAIL} specifies the starting point of the arrow. It is either None or a string consisting of the following letters: 'l', 'c', 'r', 't', 'm,', and 'b'. Letters 'l', 'c', or 'r' means to start the arrow from the left, center, or right of the text box, respectively. Letters 't', 'm', or 'b' means to start the arrow from the top, middle or bottom of the text box. For example, when @samp{tail = 'tc'} then arrow is drawn from top-center point of the text box. ARROW specifies the style of the arrow. <<arrow>>. """ self._arrows.append((tipLoc, tail, arrow))
python
def add_arrow(self, tipLoc, tail=None, arrow=arrow.default): """This method adds a straight arrow that points to @var{TIPLOC}, which is a tuple of integers. @var{TAIL} specifies the starting point of the arrow. It is either None or a string consisting of the following letters: 'l', 'c', 'r', 't', 'm,', and 'b'. Letters 'l', 'c', or 'r' means to start the arrow from the left, center, or right of the text box, respectively. Letters 't', 'm', or 'b' means to start the arrow from the top, middle or bottom of the text box. For example, when @samp{tail = 'tc'} then arrow is drawn from top-center point of the text box. ARROW specifies the style of the arrow. <<arrow>>. """ self._arrows.append((tipLoc, tail, arrow))
['def', 'add_arrow', '(', 'self', ',', 'tipLoc', ',', 'tail', '=', 'None', ',', 'arrow', '=', 'arrow', '.', 'default', ')', ':', 'self', '.', '_arrows', '.', 'append', '(', '(', 'tipLoc', ',', 'tail', ',', 'arrow', ')', ')']
This method adds a straight arrow that points to @var{TIPLOC}, which is a tuple of integers. @var{TAIL} specifies the starting point of the arrow. It is either None or a string consisting of the following letters: 'l', 'c', 'r', 't', 'm,', and 'b'. Letters 'l', 'c', or 'r' means to start the arrow from the left, center, or right of the text box, respectively. Letters 't', 'm', or 'b' means to start the arrow from the top, middle or bottom of the text box. For example, when @samp{tail = 'tc'} then arrow is drawn from top-center point of the text box. ARROW specifies the style of the arrow. <<arrow>>.
['This', 'method', 'adds', 'a', 'straight', 'arrow', 'that', 'points', 'to']
train
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/text_box.py#L86-L99
1,580
odlgroup/odl
odl/contrib/mrc/uncompr_bin.py
FileWriterRawBinaryWithHeader.write_header
def write_header(self): """Write `header` to `file`. See Also -------- write_data """ for properties in self.header.values(): value = properties['value'] offset_bytes = int(properties['offset']) self.file.seek(offset_bytes) value.tofile(self.file)
python
def write_header(self): """Write `header` to `file`. See Also -------- write_data """ for properties in self.header.values(): value = properties['value'] offset_bytes = int(properties['offset']) self.file.seek(offset_bytes) value.tofile(self.file)
['def', 'write_header', '(', 'self', ')', ':', 'for', 'properties', 'in', 'self', '.', 'header', '.', 'values', '(', ')', ':', 'value', '=', 'properties', '[', "'value'", ']', 'offset_bytes', '=', 'int', '(', 'properties', '[', "'offset'", ']', ')', 'self', '.', 'file', '.', 'seek', '(', 'offset_bytes', ')', 'value', '.', 'tofile', '(', 'self', '.', 'file', ')']
Write `header` to `file`. See Also -------- write_data
['Write', 'header', 'to', 'file', '.']
train
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/uncompr_bin.py#L642-L653
1,581
buildbot/buildbot
worker/buildbot_worker/compat.py
bytes2NativeString
def bytes2NativeString(x, encoding='utf-8'): """ Convert C{bytes} to a native C{str}. On Python 3 and higher, str and bytes are not equivalent. In this case, decode the bytes, and return a native string. On Python 2 and lower, str and bytes are equivalent. In this case, just just return the native string. @param x: a string of type C{bytes} @param encoding: an optional codec, default: 'utf-8' @return: a string of type C{str} """ if isinstance(x, bytes) and str != bytes: return x.decode(encoding) return x
python
def bytes2NativeString(x, encoding='utf-8'): """ Convert C{bytes} to a native C{str}. On Python 3 and higher, str and bytes are not equivalent. In this case, decode the bytes, and return a native string. On Python 2 and lower, str and bytes are equivalent. In this case, just just return the native string. @param x: a string of type C{bytes} @param encoding: an optional codec, default: 'utf-8' @return: a string of type C{str} """ if isinstance(x, bytes) and str != bytes: return x.decode(encoding) return x
['def', 'bytes2NativeString', '(', 'x', ',', 'encoding', '=', "'utf-8'", ')', ':', 'if', 'isinstance', '(', 'x', ',', 'bytes', ')', 'and', 'str', '!=', 'bytes', ':', 'return', 'x', '.', 'decode', '(', 'encoding', ')', 'return', 'x']
Convert C{bytes} to a native C{str}. On Python 3 and higher, str and bytes are not equivalent. In this case, decode the bytes, and return a native string. On Python 2 and lower, str and bytes are equivalent. In this case, just just return the native string. @param x: a string of type C{bytes} @param encoding: an optional codec, default: 'utf-8' @return: a string of type C{str}
['Convert', 'C', '{', 'bytes', '}', 'to', 'a', 'native', 'C', '{', 'str', '}', '.']
train
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/worker/buildbot_worker/compat.py#L38-L56
1,582
frostming/atoml
atoml/decoder.py
Decoder.parse
def parse(self, data=None, table_name=None): """Parse the lines from index i :param data: optional, store the parsed result to it when specified :param table_name: when inside a table array, it is the table name """ temp = self.dict_() sub_table = None is_array = False line = '' while True: line = self._readline() if not line: self._store_table(sub_table, temp, is_array, data=data) break # EOF if BLANK_RE.match(line): continue if TABLE_RE.match(line): next_table = self.split_string( TABLE_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table: raise TomlDecodeError(self.lineno, 'Duplicate table name' 'in origin: %r' % sub_table) else: # different table name self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = False elif TABLE_ARRAY_RE.match(line): next_table = self.split_string( TABLE_ARRAY_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): # Out of current loop # write current data dict to table dict self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table and not is_array: raise TomlDecodeError(self.lineno, 'Duplicate name of ' 'table and array of table: %r' % sub_table) else: # Begin a nested loop # Write any temp data to table dict self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = True self.parse(temp, next_table) elif KEY_RE.match(line): m = KEY_RE.match(line) keys = self.split_string(m.group(1), '.') value = self.converter.convert(line[m.end():]) if value is None: raise TomlDecodeError(self.lineno, 'Value is missing') self._store_table(keys[:-1], {keys[-1]: value}, data=temp) else: raise TomlDecodeError(self.lineno, 'Pattern is not recognized: %r' % line) # Rollback to the last line for next parse # This will do nothing if EOF is hit self.instream.seek(self.instream.tell() - len(line)) self.lineno -= 1
python
def parse(self, data=None, table_name=None): """Parse the lines from index i :param data: optional, store the parsed result to it when specified :param table_name: when inside a table array, it is the table name """ temp = self.dict_() sub_table = None is_array = False line = '' while True: line = self._readline() if not line: self._store_table(sub_table, temp, is_array, data=data) break # EOF if BLANK_RE.match(line): continue if TABLE_RE.match(line): next_table = self.split_string( TABLE_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table: raise TomlDecodeError(self.lineno, 'Duplicate table name' 'in origin: %r' % sub_table) else: # different table name self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = False elif TABLE_ARRAY_RE.match(line): next_table = self.split_string( TABLE_ARRAY_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): # Out of current loop # write current data dict to table dict self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table and not is_array: raise TomlDecodeError(self.lineno, 'Duplicate name of ' 'table and array of table: %r' % sub_table) else: # Begin a nested loop # Write any temp data to table dict self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = True self.parse(temp, next_table) elif KEY_RE.match(line): m = KEY_RE.match(line) keys = self.split_string(m.group(1), '.') value = self.converter.convert(line[m.end():]) if value is None: raise TomlDecodeError(self.lineno, 'Value is missing') self._store_table(keys[:-1], {keys[-1]: value}, data=temp) else: raise TomlDecodeError(self.lineno, 'Pattern is not recognized: %r' % line) # Rollback to the last line for next parse # This will do nothing if EOF is hit self.instream.seek(self.instream.tell() - len(line)) self.lineno -= 1
['def', 'parse', '(', 'self', ',', 'data', '=', 'None', ',', 'table_name', '=', 'None', ')', ':', 'temp', '=', 'self', '.', 'dict_', '(', ')', 'sub_table', '=', 'None', 'is_array', '=', 'False', 'line', '=', "''", 'while', 'True', ':', 'line', '=', 'self', '.', '_readline', '(', ')', 'if', 'not', 'line', ':', 'self', '.', '_store_table', '(', 'sub_table', ',', 'temp', ',', 'is_array', ',', 'data', '=', 'data', ')', 'break', '# EOF', 'if', 'BLANK_RE', '.', 'match', '(', 'line', ')', ':', 'continue', 'if', 'TABLE_RE', '.', 'match', '(', 'line', ')', ':', 'next_table', '=', 'self', '.', 'split_string', '(', 'TABLE_RE', '.', 'match', '(', 'line', ')', '.', 'group', '(', '1', ')', ',', "'.'", ',', 'False', ')', 'if', 'table_name', 'and', 'not', 'contains_list', '(', 'next_table', ',', 'table_name', ')', ':', 'self', '.', '_store_table', '(', 'sub_table', ',', 'temp', ',', 'is_array', ',', 'data', '=', 'data', ')', 'break', 'table', '=', 'cut_list', '(', 'next_table', ',', 'table_name', ')', 'if', 'sub_table', '==', 'table', ':', 'raise', 'TomlDecodeError', '(', 'self', '.', 'lineno', ',', "'Duplicate table name'", "'in origin: %r'", '%', 'sub_table', ')', 'else', ':', '# different table name', 'self', '.', '_store_table', '(', 'sub_table', ',', 'temp', ',', 'is_array', ',', 'data', '=', 'data', ')', 'sub_table', '=', 'table', 'is_array', '=', 'False', 'elif', 'TABLE_ARRAY_RE', '.', 'match', '(', 'line', ')', ':', 'next_table', '=', 'self', '.', 'split_string', '(', 'TABLE_ARRAY_RE', '.', 'match', '(', 'line', ')', '.', 'group', '(', '1', ')', ',', "'.'", ',', 'False', ')', 'if', 'table_name', 'and', 'not', 'contains_list', '(', 'next_table', ',', 'table_name', ')', ':', '# Out of current loop', '# write current data dict to table dict', 'self', '.', '_store_table', '(', 'sub_table', ',', 'temp', ',', 'is_array', ',', 'data', '=', 'data', ')', 'break', 'table', '=', 'cut_list', '(', 'next_table', ',', 'table_name', ')', 'if', 'sub_table', '==', 'table', 'and', 'not', 'is_array', ':', 'raise', 'TomlDecodeError', '(', 'self', '.', 'lineno', ',', "'Duplicate name of '", "'table and array of table: %r'", '%', 'sub_table', ')', 'else', ':', '# Begin a nested loop', '# Write any temp data to table dict', 'self', '.', '_store_table', '(', 'sub_table', ',', 'temp', ',', 'is_array', ',', 'data', '=', 'data', ')', 'sub_table', '=', 'table', 'is_array', '=', 'True', 'self', '.', 'parse', '(', 'temp', ',', 'next_table', ')', 'elif', 'KEY_RE', '.', 'match', '(', 'line', ')', ':', 'm', '=', 'KEY_RE', '.', 'match', '(', 'line', ')', 'keys', '=', 'self', '.', 'split_string', '(', 'm', '.', 'group', '(', '1', ')', ',', "'.'", ')', 'value', '=', 'self', '.', 'converter', '.', 'convert', '(', 'line', '[', 'm', '.', 'end', '(', ')', ':', ']', ')', 'if', 'value', 'is', 'None', ':', 'raise', 'TomlDecodeError', '(', 'self', '.', 'lineno', ',', "'Value is missing'", ')', 'self', '.', '_store_table', '(', 'keys', '[', ':', '-', '1', ']', ',', '{', 'keys', '[', '-', '1', ']', ':', 'value', '}', ',', 'data', '=', 'temp', ')', 'else', ':', 'raise', 'TomlDecodeError', '(', 'self', '.', 'lineno', ',', "'Pattern is not recognized: %r'", '%', 'line', ')', '# Rollback to the last line for next parse', '# This will do nothing if EOF is hit', 'self', '.', 'instream', '.', 'seek', '(', 'self', '.', 'instream', '.', 'tell', '(', ')', '-', 'len', '(', 'line', ')', ')', 'self', '.', 'lineno', '-=', '1']
Parse the lines from index i :param data: optional, store the parsed result to it when specified :param table_name: when inside a table array, it is the table name
['Parse', 'the', 'lines', 'from', 'index', 'i']
train
https://github.com/frostming/atoml/blob/85414ef77777366887a819a05b496d5279296cd2/atoml/decoder.py#L323-L386
1,583
maxpumperla/elephas
elephas/utils/rdd_utils.py
lp_to_simple_rdd
def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD into an RDD of feature-label pairs :param lp_rdd: LabeledPoint RDD of features and labels :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: int, number of total classes :return: Spark RDD with feature-label pairs """ if categorical: if not nb_classes: labels = np.asarray(lp_rdd.map( lambda lp: lp.label).collect(), dtype='int32') nb_classes = np.max(labels) + 1 rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), encode_label(lp.label, nb_classes))) else: rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label)) return rdd
python
def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD into an RDD of feature-label pairs :param lp_rdd: LabeledPoint RDD of features and labels :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: int, number of total classes :return: Spark RDD with feature-label pairs """ if categorical: if not nb_classes: labels = np.asarray(lp_rdd.map( lambda lp: lp.label).collect(), dtype='int32') nb_classes = np.max(labels) + 1 rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), encode_label(lp.label, nb_classes))) else: rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label)) return rdd
['def', 'lp_to_simple_rdd', '(', 'lp_rdd', ',', 'categorical', '=', 'False', ',', 'nb_classes', '=', 'None', ')', ':', 'if', 'categorical', ':', 'if', 'not', 'nb_classes', ':', 'labels', '=', 'np', '.', 'asarray', '(', 'lp_rdd', '.', 'map', '(', 'lambda', 'lp', ':', 'lp', '.', 'label', ')', '.', 'collect', '(', ')', ',', 'dtype', '=', "'int32'", ')', 'nb_classes', '=', 'np', '.', 'max', '(', 'labels', ')', '+', '1', 'rdd', '=', 'lp_rdd', '.', 'map', '(', 'lambda', 'lp', ':', '(', 'from_vector', '(', 'lp', '.', 'features', ')', ',', 'encode_label', '(', 'lp', '.', 'label', ',', 'nb_classes', ')', ')', ')', 'else', ':', 'rdd', '=', 'lp_rdd', '.', 'map', '(', 'lambda', 'lp', ':', '(', 'from_vector', '(', 'lp', '.', 'features', ')', ',', 'lp', '.', 'label', ')', ')', 'return', 'rdd']
Convert a LabeledPoint RDD into an RDD of feature-label pairs :param lp_rdd: LabeledPoint RDD of features and labels :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: int, number of total classes :return: Spark RDD with feature-label pairs
['Convert', 'a', 'LabeledPoint', 'RDD', 'into', 'an', 'RDD', 'of', 'feature', '-', 'label', 'pairs']
train
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L104-L121
1,584
datastax/python-driver
cassandra/cluster.py
ResultSet.fetch_next_page
def fetch_next_page(self): """ Manually, synchronously fetch the next page. Supplied for manually retrieving pages and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating through results; paging happens implicitly in iteration. """ if self.response_future.has_more_pages: self.response_future.start_fetching_next_page() result = self.response_future.result() self._current_rows = result._current_rows # ResultSet has already _set_current_rows to the appropriate form else: self._current_rows = []
python
def fetch_next_page(self): """ Manually, synchronously fetch the next page. Supplied for manually retrieving pages and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating through results; paging happens implicitly in iteration. """ if self.response_future.has_more_pages: self.response_future.start_fetching_next_page() result = self.response_future.result() self._current_rows = result._current_rows # ResultSet has already _set_current_rows to the appropriate form else: self._current_rows = []
['def', 'fetch_next_page', '(', 'self', ')', ':', 'if', 'self', '.', 'response_future', '.', 'has_more_pages', ':', 'self', '.', 'response_future', '.', 'start_fetching_next_page', '(', ')', 'result', '=', 'self', '.', 'response_future', '.', 'result', '(', ')', 'self', '.', '_current_rows', '=', 'result', '.', '_current_rows', '# ResultSet has already _set_current_rows to the appropriate form', 'else', ':', 'self', '.', '_current_rows', '=', '[', ']']
Manually, synchronously fetch the next page. Supplied for manually retrieving pages and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating through results; paging happens implicitly in iteration.
['Manually', 'synchronously', 'fetch', 'the', 'next', 'page', '.', 'Supplied', 'for', 'manually', 'retrieving', 'pages', 'and', 'inspecting', ':', 'meth', ':', '~', '.', 'current_page', '.', 'It', 'is', 'not', 'necessary', 'to', 'call', 'this', 'when', 'iterating', 'through', 'results', ';', 'paging', 'happens', 'implicitly', 'in', 'iteration', '.']
train
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L4424-L4435
1,585
ereOn/azmq
azmq/common.py
CompositeClosableAsyncObject.register_child
def register_child(self, child): """ Register a new child that will be closed whenever the current instance closes. :param child: The child instance. """ if self.closing: child.close() else: self._children.add(child) child.on_closed.connect(self.unregister_child)
python
def register_child(self, child): """ Register a new child that will be closed whenever the current instance closes. :param child: The child instance. """ if self.closing: child.close() else: self._children.add(child) child.on_closed.connect(self.unregister_child)
['def', 'register_child', '(', 'self', ',', 'child', ')', ':', 'if', 'self', '.', 'closing', ':', 'child', '.', 'close', '(', ')', 'else', ':', 'self', '.', '_children', '.', 'add', '(', 'child', ')', 'child', '.', 'on_closed', '.', 'connect', '(', 'self', '.', 'unregister_child', ')']
Register a new child that will be closed whenever the current instance closes. :param child: The child instance.
['Register', 'a', 'new', 'child', 'that', 'will', 'be', 'closed', 'whenever', 'the', 'current', 'instance', 'closes', '.']
train
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/common.py#L202-L213
1,586
zengbin93/zb
zb/base.py
ZbList.discrete_index
def discrete_index(self, indices): """get elements by discrete indices :param indices: list discrete indices :return: elements """ elements = [] for i in indices: elements.append(self[i]) return elements
python
def discrete_index(self, indices): """get elements by discrete indices :param indices: list discrete indices :return: elements """ elements = [] for i in indices: elements.append(self[i]) return elements
['def', 'discrete_index', '(', 'self', ',', 'indices', ')', ':', 'elements', '=', '[', ']', 'for', 'i', 'in', 'indices', ':', 'elements', '.', 'append', '(', 'self', '[', 'i', ']', ')', 'return', 'elements']
get elements by discrete indices :param indices: list discrete indices :return: elements
['get', 'elements', 'by', 'discrete', 'indices']
train
https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/base.py#L28-L38
1,587
chakki-works/seqeval
seqeval/callbacks.py
F1Metrics.get_length
def get_length(self, y): """Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3] """ lens = [self.find_pad_index(row) for row in y] return lens
python
def get_length(self, y): """Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3] """ lens = [self.find_pad_index(row) for row in y] return lens
['def', 'get_length', '(', 'self', ',', 'y', ')', ':', 'lens', '=', '[', 'self', '.', 'find_pad_index', '(', 'row', ')', 'for', 'row', 'in', 'y', ']', 'return', 'lens']
Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3]
['Get', 'true', 'length', 'of', 'y', '.']
train
https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L40-L55
1,588
pycontribs/pyrax
pyrax/__init__.py
connect_to_cloudfiles
def connect_to_cloudfiles(region=None, public=None): """Creates a client for working with CloudFiles/Swift.""" if public is None: is_public = not bool(get_setting("use_servicenet")) else: is_public = public ret = _create_client(ep_name="object_store", region=region, public=is_public) if ret: # Add CDN endpoints, if available region = _safe_region(region) ret.cdn_management_url = _get_service_endpoint(None, "object_cdn", region, public=is_public) return ret
python
def connect_to_cloudfiles(region=None, public=None): """Creates a client for working with CloudFiles/Swift.""" if public is None: is_public = not bool(get_setting("use_servicenet")) else: is_public = public ret = _create_client(ep_name="object_store", region=region, public=is_public) if ret: # Add CDN endpoints, if available region = _safe_region(region) ret.cdn_management_url = _get_service_endpoint(None, "object_cdn", region, public=is_public) return ret
['def', 'connect_to_cloudfiles', '(', 'region', '=', 'None', ',', 'public', '=', 'None', ')', ':', 'if', 'public', 'is', 'None', ':', 'is_public', '=', 'not', 'bool', '(', 'get_setting', '(', '"use_servicenet"', ')', ')', 'else', ':', 'is_public', '=', 'public', 'ret', '=', '_create_client', '(', 'ep_name', '=', '"object_store"', ',', 'region', '=', 'region', ',', 'public', '=', 'is_public', ')', 'if', 'ret', ':', '# Add CDN endpoints, if available', 'region', '=', '_safe_region', '(', 'region', ')', 'ret', '.', 'cdn_management_url', '=', '_get_service_endpoint', '(', 'None', ',', '"object_cdn"', ',', 'region', ',', 'public', '=', 'is_public', ')', 'return', 'ret']
Creates a client for working with CloudFiles/Swift.
['Creates', 'a', 'client', 'for', 'working', 'with', 'CloudFiles', '/', 'Swift', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/__init__.py#L730-L743
1,589
Midnighter/dependency-info
src/depinfo/info.py
get_sys_info
def get_sys_info(): """Return system information as a dict.""" blob = dict() blob["OS"] = platform.system() blob["OS-release"] = platform.release() blob["Python"] = platform.python_version() return blob
python
def get_sys_info(): """Return system information as a dict.""" blob = dict() blob["OS"] = platform.system() blob["OS-release"] = platform.release() blob["Python"] = platform.python_version() return blob
['def', 'get_sys_info', '(', ')', ':', 'blob', '=', 'dict', '(', ')', 'blob', '[', '"OS"', ']', '=', 'platform', '.', 'system', '(', ')', 'blob', '[', '"OS-release"', ']', '=', 'platform', '.', 'release', '(', ')', 'blob', '[', '"Python"', ']', '=', 'platform', '.', 'python_version', '(', ')', 'return', 'blob']
Return system information as a dict.
['Return', 'system', 'information', 'as', 'a', 'dict', '.']
train
https://github.com/Midnighter/dependency-info/blob/15bcada0a1d6c047cbe10b844d5bd909ea8cc752/src/depinfo/info.py#L38-L44
1,590
secdev/scapy
scapy/layers/tls/automaton.py
_TLSAutomaton.raise_on_packet
def raise_on_packet(self, pkt_cls, state, get_next_msg=True): """ If the next message to be processed has type 'pkt_cls', raise 'state'. If there is no message waiting to be processed, we try to get one with the default 'get_next_msg' parameters. """ # Maybe we already parsed the expected packet, maybe not. if get_next_msg: self.get_next_msg() if (not self.buffer_in or not isinstance(self.buffer_in[0], pkt_cls)): return self.cur_pkt = self.buffer_in[0] self.buffer_in = self.buffer_in[1:] raise state()
python
def raise_on_packet(self, pkt_cls, state, get_next_msg=True): """ If the next message to be processed has type 'pkt_cls', raise 'state'. If there is no message waiting to be processed, we try to get one with the default 'get_next_msg' parameters. """ # Maybe we already parsed the expected packet, maybe not. if get_next_msg: self.get_next_msg() if (not self.buffer_in or not isinstance(self.buffer_in[0], pkt_cls)): return self.cur_pkt = self.buffer_in[0] self.buffer_in = self.buffer_in[1:] raise state()
['def', 'raise_on_packet', '(', 'self', ',', 'pkt_cls', ',', 'state', ',', 'get_next_msg', '=', 'True', ')', ':', '# Maybe we already parsed the expected packet, maybe not.', 'if', 'get_next_msg', ':', 'self', '.', 'get_next_msg', '(', ')', 'if', '(', 'not', 'self', '.', 'buffer_in', 'or', 'not', 'isinstance', '(', 'self', '.', 'buffer_in', '[', '0', ']', ',', 'pkt_cls', ')', ')', ':', 'return', 'self', '.', 'cur_pkt', '=', 'self', '.', 'buffer_in', '[', '0', ']', 'self', '.', 'buffer_in', '=', 'self', '.', 'buffer_in', '[', '1', ':', ']', 'raise', 'state', '(', ')']
If the next message to be processed has type 'pkt_cls', raise 'state'. If there is no message waiting to be processed, we try to get one with the default 'get_next_msg' parameters.
['If', 'the', 'next', 'message', 'to', 'be', 'processed', 'has', 'type', 'pkt_cls', 'raise', 'state', '.', 'If', 'there', 'is', 'no', 'message', 'waiting', 'to', 'be', 'processed', 'we', 'try', 'to', 'get', 'one', 'with', 'the', 'default', 'get_next_msg', 'parameters', '.']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/automaton.py#L168-L182
1,591
hubo1016/aiogrpc
aiogrpc/channel.py
secure_channel
def secure_channel(target, credentials, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False): """Creates a secure Channel to a server. Args: target: The server address. credentials: A ChannelCredentials instance. options: An optional list of key-value pairs (channel args in gRPC runtime) to configure the channel. Returns: A Channel object. """ return Channel(_grpc.secure_channel(target, credentials, options), loop, executor, standalone_pool_for_streaming)
python
def secure_channel(target, credentials, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False): """Creates a secure Channel to a server. Args: target: The server address. credentials: A ChannelCredentials instance. options: An optional list of key-value pairs (channel args in gRPC runtime) to configure the channel. Returns: A Channel object. """ return Channel(_grpc.secure_channel(target, credentials, options), loop, executor, standalone_pool_for_streaming)
['def', 'secure_channel', '(', 'target', ',', 'credentials', ',', 'options', '=', 'None', ',', '*', ',', 'loop', '=', 'None', ',', 'executor', '=', 'None', ',', 'standalone_pool_for_streaming', '=', 'False', ')', ':', 'return', 'Channel', '(', '_grpc', '.', 'secure_channel', '(', 'target', ',', 'credentials', ',', 'options', ')', ',', 'loop', ',', 'executor', ',', 'standalone_pool_for_streaming', ')']
Creates a secure Channel to a server. Args: target: The server address. credentials: A ChannelCredentials instance. options: An optional list of key-value pairs (channel args in gRPC runtime) to configure the channel. Returns: A Channel object.
['Creates', 'a', 'secure', 'Channel', 'to', 'a', 'server', '.']
train
https://github.com/hubo1016/aiogrpc/blob/5bc98bfbe9f2e11dd0eab8e93b8aeefbcc2ccd4b/aiogrpc/channel.py#L432-L446
1,592
sahilchinoy/django-irs-filings
irs/management/commands/loadIRS.py
Command.build_mappings
def build_mappings(self): """ Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows. """ self.mappings = {} for record_type in ('sa', 'sb', 'F8872'): path = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(__file__))), 'mappings', '{}.csv'.format(record_type)) mapping = {} with open(path, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: mapping[row['position']] = ( row['model_name'], row['field_type']) self.mappings[record_type] = mapping
python
def build_mappings(self): """ Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows. """ self.mappings = {} for record_type in ('sa', 'sb', 'F8872'): path = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(__file__))), 'mappings', '{}.csv'.format(record_type)) mapping = {} with open(path, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: mapping[row['position']] = ( row['model_name'], row['field_type']) self.mappings[record_type] = mapping
['def', 'build_mappings', '(', 'self', ')', ':', 'self', '.', 'mappings', '=', '{', '}', 'for', 'record_type', 'in', '(', "'sa'", ',', "'sb'", ',', "'F8872'", ')', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', ')', ')', ',', "'mappings'", ',', "'{}.csv'", '.', 'format', '(', 'record_type', ')', ')', 'mapping', '=', '{', '}', 'with', 'open', '(', 'path', ',', "'r'", ')', 'as', 'csvfile', ':', 'reader', '=', 'csv', '.', 'DictReader', '(', 'csvfile', ')', 'for', 'row', 'in', 'reader', ':', 'mapping', '[', 'row', '[', "'position'", ']', ']', '=', '(', 'row', '[', "'model_name'", ']', ',', 'row', '[', "'field_type'", ']', ')', 'self', '.', 'mappings', '[', 'record_type', ']', '=', 'mapping']
Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows.
['Uses', 'CSV', 'files', 'of', 'field', 'names', 'and', 'positions', 'for', 'different', 'filing', 'types', 'to', 'load', 'mappings', 'into', 'memory', 'for', 'use', 'in', 'parsing', 'different', 'types', 'of', 'rows', '.']
train
https://github.com/sahilchinoy/django-irs-filings/blob/efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b/irs/management/commands/loadIRS.py#L244-L266
1,593
pyblish/pyblish-qml
pyblish_qml/vendor/mock.py
NonCallableMock.attach_mock
def attach_mock(self, mock, attribute): """ Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.""" mock._mock_parent = None mock._mock_new_parent = None mock._mock_name = '' mock._mock_new_name = None setattr(self, attribute, mock)
python
def attach_mock(self, mock, attribute): """ Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.""" mock._mock_parent = None mock._mock_new_parent = None mock._mock_name = '' mock._mock_new_name = None setattr(self, attribute, mock)
['def', 'attach_mock', '(', 'self', ',', 'mock', ',', 'attribute', ')', ':', 'mock', '.', '_mock_parent', '=', 'None', 'mock', '.', '_mock_new_parent', '=', 'None', 'mock', '.', '_mock_name', '=', "''", 'mock', '.', '_mock_new_name', '=', 'None', 'setattr', '(', 'self', ',', 'attribute', ',', 'mock', ')']
Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.
['Attach', 'a', 'mock', 'as', 'an', 'attribute', 'of', 'this', 'one', 'replacing', 'its', 'name', 'and', 'parent', '.', 'Calls', 'to', 'the', 'attached', 'mock', 'will', 'be', 'recorded', 'in', 'the', 'method_calls', 'and', 'mock_calls', 'attributes', 'of', 'this', 'one', '.']
train
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/vendor/mock.py#L518-L528
1,594
ph4r05/monero-serialize
monero_serialize/xmrserialize.py
Archive._dump_variant
async def _dump_variant(self, writer, elem, elem_type=None, params=None): """ Dumps variant type to the writer. Supports both wrapped and raw variant. :param writer: :param elem: :param elem_type: :param params: :return: """ if isinstance(elem, VariantType) or elem_type.WRAPS_VALUE: await dump_uint(writer, elem.variant_elem_type.VARIANT_CODE, 1) await self.dump_field( writer, getattr(elem, elem.variant_elem), elem.variant_elem_type ) else: fdef = find_variant_fdef(elem_type, elem) await dump_uint(writer, fdef[1].VARIANT_CODE, 1) await self.dump_field(writer, elem, fdef[1])
python
async def _dump_variant(self, writer, elem, elem_type=None, params=None): """ Dumps variant type to the writer. Supports both wrapped and raw variant. :param writer: :param elem: :param elem_type: :param params: :return: """ if isinstance(elem, VariantType) or elem_type.WRAPS_VALUE: await dump_uint(writer, elem.variant_elem_type.VARIANT_CODE, 1) await self.dump_field( writer, getattr(elem, elem.variant_elem), elem.variant_elem_type ) else: fdef = find_variant_fdef(elem_type, elem) await dump_uint(writer, fdef[1].VARIANT_CODE, 1) await self.dump_field(writer, elem, fdef[1])
['async', 'def', '_dump_variant', '(', 'self', ',', 'writer', ',', 'elem', ',', 'elem_type', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'elem', ',', 'VariantType', ')', 'or', 'elem_type', '.', 'WRAPS_VALUE', ':', 'await', 'dump_uint', '(', 'writer', ',', 'elem', '.', 'variant_elem_type', '.', 'VARIANT_CODE', ',', '1', ')', 'await', 'self', '.', 'dump_field', '(', 'writer', ',', 'getattr', '(', 'elem', ',', 'elem', '.', 'variant_elem', ')', ',', 'elem', '.', 'variant_elem_type', ')', 'else', ':', 'fdef', '=', 'find_variant_fdef', '(', 'elem_type', ',', 'elem', ')', 'await', 'dump_uint', '(', 'writer', ',', 'fdef', '[', '1', ']', '.', 'VARIANT_CODE', ',', '1', ')', 'await', 'self', '.', 'dump_field', '(', 'writer', ',', 'elem', ',', 'fdef', '[', '1', ']', ')']
Dumps variant type to the writer. Supports both wrapped and raw variant. :param writer: :param elem: :param elem_type: :param params: :return:
['Dumps', 'variant', 'type', 'to', 'the', 'writer', '.', 'Supports', 'both', 'wrapped', 'and', 'raw', 'variant', '.']
train
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrserialize.py#L686-L706
1,595
projectshift/shift-boiler
boiler/cli/colors.py
colour
def colour(colour, message, bold=False): """ Color a message """ return style(fg=colour, text=message, bold=bold)
python
def colour(colour, message, bold=False): """ Color a message """ return style(fg=colour, text=message, bold=bold)
['def', 'colour', '(', 'colour', ',', 'message', ',', 'bold', '=', 'False', ')', ':', 'return', 'style', '(', 'fg', '=', 'colour', ',', 'text', '=', 'message', ',', 'bold', '=', 'bold', ')']
Color a message
['Color', 'a', 'message']
train
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/colors.py#L4-L6
1,596
twoolie/NBT
nbt/region.py
RegionFile.iter_chunks_class
def iter_chunks_class(self): """ Yield each readable chunk present in the region. Chunks that can not be read for whatever reason are silently skipped. This function returns a :class:`nbt.chunk.Chunk` instance. """ for m in self.get_metadata(): try: yield self.chunkclass(self.get_chunk(m.x, m.z)) except RegionFileFormatError: pass
python
def iter_chunks_class(self): """ Yield each readable chunk present in the region. Chunks that can not be read for whatever reason are silently skipped. This function returns a :class:`nbt.chunk.Chunk` instance. """ for m in self.get_metadata(): try: yield self.chunkclass(self.get_chunk(m.x, m.z)) except RegionFileFormatError: pass
['def', 'iter_chunks_class', '(', 'self', ')', ':', 'for', 'm', 'in', 'self', '.', 'get_metadata', '(', ')', ':', 'try', ':', 'yield', 'self', '.', 'chunkclass', '(', 'self', '.', 'get_chunk', '(', 'm', '.', 'x', ',', 'm', '.', 'z', ')', ')', 'except', 'RegionFileFormatError', ':', 'pass']
Yield each readable chunk present in the region. Chunks that can not be read for whatever reason are silently skipped. This function returns a :class:`nbt.chunk.Chunk` instance.
['Yield', 'each', 'readable', 'chunk', 'present', 'in', 'the', 'region', '.', 'Chunks', 'that', 'can', 'not', 'be', 'read', 'for', 'whatever', 'reason', 'are', 'silently', 'skipped', '.', 'This', 'function', 'returns', 'a', ':', 'class', ':', 'nbt', '.', 'chunk', '.', 'Chunk', 'instance', '.']
train
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L486-L496
1,597
materialsproject/pymatgen
pymatgen/symmetry/analyzer.py
PointGroupAnalyzer._get_smallest_set_not_on_axis
def _get_smallest_set_not_on_axis(self, axis): """ Returns the smallest list of atoms with the same species and distance from origin AND does not lie on the specified axis. This maximal set limits the possible rotational symmetry operations, since atoms lying on a test axis is irrelevant in testing rotational symmetryOperations. """ def not_on_axis(site): v = np.cross(site.coords, axis) return np.linalg.norm(v) > self.tol valid_sets = [] origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol) for test_set in dist_el_sites.values(): valid_set = list(filter(not_on_axis, test_set)) if len(valid_set) > 0: valid_sets.append(valid_set) return min(valid_sets, key=lambda s: len(s))
python
def _get_smallest_set_not_on_axis(self, axis): """ Returns the smallest list of atoms with the same species and distance from origin AND does not lie on the specified axis. This maximal set limits the possible rotational symmetry operations, since atoms lying on a test axis is irrelevant in testing rotational symmetryOperations. """ def not_on_axis(site): v = np.cross(site.coords, axis) return np.linalg.norm(v) > self.tol valid_sets = [] origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol) for test_set in dist_el_sites.values(): valid_set = list(filter(not_on_axis, test_set)) if len(valid_set) > 0: valid_sets.append(valid_set) return min(valid_sets, key=lambda s: len(s))
['def', '_get_smallest_set_not_on_axis', '(', 'self', ',', 'axis', ')', ':', 'def', 'not_on_axis', '(', 'site', ')', ':', 'v', '=', 'np', '.', 'cross', '(', 'site', '.', 'coords', ',', 'axis', ')', 'return', 'np', '.', 'linalg', '.', 'norm', '(', 'v', ')', '>', 'self', '.', 'tol', 'valid_sets', '=', '[', ']', 'origin_site', ',', 'dist_el_sites', '=', 'cluster_sites', '(', 'self', '.', 'centered_mol', ',', 'self', '.', 'tol', ')', 'for', 'test_set', 'in', 'dist_el_sites', '.', 'values', '(', ')', ':', 'valid_set', '=', 'list', '(', 'filter', '(', 'not_on_axis', ',', 'test_set', ')', ')', 'if', 'len', '(', 'valid_set', ')', '>', '0', ':', 'valid_sets', '.', 'append', '(', 'valid_set', ')', 'return', 'min', '(', 'valid_sets', ',', 'key', '=', 'lambda', 's', ':', 'len', '(', 's', ')', ')']
Returns the smallest list of atoms with the same species and distance from origin AND does not lie on the specified axis. This maximal set limits the possible rotational symmetry operations, since atoms lying on a test axis is irrelevant in testing rotational symmetryOperations.
['Returns', 'the', 'smallest', 'list', 'of', 'atoms', 'with', 'the', 'same', 'species', 'and', 'distance', 'from', 'origin', 'AND', 'does', 'not', 'lie', 'on', 'the', 'specified', 'axis', '.', 'This', 'maximal', 'set', 'limits', 'the', 'possible', 'rotational', 'symmetry', 'operations', 'since', 'atoms', 'lying', 'on', 'a', 'test', 'axis', 'is', 'irrelevant', 'in', 'testing', 'rotational', 'symmetryOperations', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1058-L1078
1,598
armstrong/armstrong.core.arm_sections
armstrong/core/arm_sections/models.py
BaseSection.toggle_item
def toggle_item(self, item, test_func, field_name=None): """ Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined. """ if test_func(item): self.add_item(item, field_name) return True else: self.remove_item(item, field_name) return False
python
def toggle_item(self, item, test_func, field_name=None): """ Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined. """ if test_func(item): self.add_item(item, field_name) return True else: self.remove_item(item, field_name) return False
['def', 'toggle_item', '(', 'self', ',', 'item', ',', 'test_func', ',', 'field_name', '=', 'None', ')', ':', 'if', 'test_func', '(', 'item', ')', ':', 'self', '.', 'add_item', '(', 'item', ',', 'field_name', ')', 'return', 'True', 'else', ':', 'self', '.', 'remove_item', '(', 'item', ',', 'field_name', ')', 'return', 'False']
Toggles the section based on test_func. test_func takes an item and returns a boolean. If it returns True, the item will be added to the given section. It will be removed from the section otherwise. Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL. Behavior on other items is undefined.
['Toggles', 'the', 'section', 'based', 'on', 'test_func', '.']
train
https://github.com/armstrong/armstrong.core.arm_sections/blob/39c999c93771da909359e53b35afefe4846f77cb/armstrong/core/arm_sections/models.py#L153-L169
1,599
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.fetch
def fetch(self, force=False): """ returns (dict): {'core': path to new egg, None if no update, 'gpg_sig': path to new sig, None if no update} """ tmpdir = tempfile.mkdtemp() fetch_results = { 'core': os.path.join(tmpdir, 'insights-core.egg'), 'gpg_sig': os.path.join(tmpdir, 'insights-core.egg.asc') } logger.debug("Beginning core fetch.") # guess the URLs based on what legacy setting is egg_url = self.config.egg_path egg_gpg_url = self.config.egg_gpg_path if egg_url is None: if self.config.legacy_upload: egg_url = '/v1/static/core/insights-core.egg' else: egg_url = '/static/insights-core.egg' if egg_gpg_url is None: if self.config.legacy_upload: egg_gpg_url = '/v1/static/core/insights-core.egg.asc' else: egg_gpg_url = '/static/insights-core.egg.asc' # run fetch for egg updated = self._fetch(egg_url, constants.core_etag_file, fetch_results['core'], force) # if new core was fetched, get new core sig if updated: logger.debug("New core was fetched.") logger.debug("Beginning fetch for core gpg signature.") self._fetch(egg_gpg_url, constants.core_gpg_sig_etag_file, fetch_results['gpg_sig'], force) return fetch_results
python
def fetch(self, force=False): """ returns (dict): {'core': path to new egg, None if no update, 'gpg_sig': path to new sig, None if no update} """ tmpdir = tempfile.mkdtemp() fetch_results = { 'core': os.path.join(tmpdir, 'insights-core.egg'), 'gpg_sig': os.path.join(tmpdir, 'insights-core.egg.asc') } logger.debug("Beginning core fetch.") # guess the URLs based on what legacy setting is egg_url = self.config.egg_path egg_gpg_url = self.config.egg_gpg_path if egg_url is None: if self.config.legacy_upload: egg_url = '/v1/static/core/insights-core.egg' else: egg_url = '/static/insights-core.egg' if egg_gpg_url is None: if self.config.legacy_upload: egg_gpg_url = '/v1/static/core/insights-core.egg.asc' else: egg_gpg_url = '/static/insights-core.egg.asc' # run fetch for egg updated = self._fetch(egg_url, constants.core_etag_file, fetch_results['core'], force) # if new core was fetched, get new core sig if updated: logger.debug("New core was fetched.") logger.debug("Beginning fetch for core gpg signature.") self._fetch(egg_gpg_url, constants.core_gpg_sig_etag_file, fetch_results['gpg_sig'], force) return fetch_results
['def', 'fetch', '(', 'self', ',', 'force', '=', 'False', ')', ':', 'tmpdir', '=', 'tempfile', '.', 'mkdtemp', '(', ')', 'fetch_results', '=', '{', "'core'", ':', 'os', '.', 'path', '.', 'join', '(', 'tmpdir', ',', "'insights-core.egg'", ')', ',', "'gpg_sig'", ':', 'os', '.', 'path', '.', 'join', '(', 'tmpdir', ',', "'insights-core.egg.asc'", ')', '}', 'logger', '.', 'debug', '(', '"Beginning core fetch."', ')', '# guess the URLs based on what legacy setting is', 'egg_url', '=', 'self', '.', 'config', '.', 'egg_path', 'egg_gpg_url', '=', 'self', '.', 'config', '.', 'egg_gpg_path', 'if', 'egg_url', 'is', 'None', ':', 'if', 'self', '.', 'config', '.', 'legacy_upload', ':', 'egg_url', '=', "'/v1/static/core/insights-core.egg'", 'else', ':', 'egg_url', '=', "'/static/insights-core.egg'", 'if', 'egg_gpg_url', 'is', 'None', ':', 'if', 'self', '.', 'config', '.', 'legacy_upload', ':', 'egg_gpg_url', '=', "'/v1/static/core/insights-core.egg.asc'", 'else', ':', 'egg_gpg_url', '=', "'/static/insights-core.egg.asc'", '# run fetch for egg', 'updated', '=', 'self', '.', '_fetch', '(', 'egg_url', ',', 'constants', '.', 'core_etag_file', ',', 'fetch_results', '[', "'core'", ']', ',', 'force', ')', '# if new core was fetched, get new core sig', 'if', 'updated', ':', 'logger', '.', 'debug', '(', '"New core was fetched."', ')', 'logger', '.', 'debug', '(', '"Beginning fetch for core gpg signature."', ')', 'self', '.', '_fetch', '(', 'egg_gpg_url', ',', 'constants', '.', 'core_gpg_sig_etag_file', ',', 'fetch_results', '[', "'gpg_sig'", ']', ',', 'force', ')', 'return', 'fetch_results']
returns (dict): {'core': path to new egg, None if no update, 'gpg_sig': path to new sig, None if no update}
['returns', '(', 'dict', ')', ':', '{', 'core', ':', 'path', 'to', 'new', 'egg', 'None', 'if', 'no', 'update', 'gpg_sig', ':', 'path', 'to', 'new', 'sig', 'None', 'if', 'no', 'update', '}']
train
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L90-L131