repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ff0000/scarlet
scarlet/cms/forms.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/forms.py#L38-L59
def get_filter_kwargs(self): """ Translates the cleaned data into a dictionary that can used to generate the filter removing blank values. """ if self.is_valid(): filter_kwargs = {} for field in self.get_filter_fields(): empty_values = EMPTY_VALUES if hasattr(self.fields[field], 'empty_values'): empty_values = self.fields[field].empty_values value = self.cleaned_data.get(field) if not value in empty_values: if self.search_fields and field in self.search_fields: filter_kwargs["%s__icontains" % field] = value else: filter_kwargs[field] = value return filter_kwargs else: return {}
[ "def", "get_filter_kwargs", "(", "self", ")", ":", "if", "self", ".", "is_valid", "(", ")", ":", "filter_kwargs", "=", "{", "}", "for", "field", "in", "self", ".", "get_filter_fields", "(", ")", ":", "empty_values", "=", "EMPTY_VALUES", "if", "hasattr", "(", "self", ".", "fields", "[", "field", "]", ",", "'empty_values'", ")", ":", "empty_values", "=", "self", ".", "fields", "[", "field", "]", ".", "empty_values", "value", "=", "self", ".", "cleaned_data", ".", "get", "(", "field", ")", "if", "not", "value", "in", "empty_values", ":", "if", "self", ".", "search_fields", "and", "field", "in", "self", ".", "search_fields", ":", "filter_kwargs", "[", "\"%s__icontains\"", "%", "field", "]", "=", "value", "else", ":", "filter_kwargs", "[", "field", "]", "=", "value", "return", "filter_kwargs", "else", ":", "return", "{", "}" ]
Translates the cleaned data into a dictionary that can used to generate the filter removing blank values.
[ "Translates", "the", "cleaned", "data", "into", "a", "dictionary", "that", "can", "used", "to", "generate", "the", "filter", "removing", "blank", "values", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L236-L248
def mac_group_mac_group_entry_entry_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mac_group = ET.SubElement(config, "mac-group", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table") mac_group_id_key = ET.SubElement(mac_group, "mac-group-id") mac_group_id_key.text = kwargs.pop('mac_group_id') mac_group_entry = ET.SubElement(mac_group, "mac-group-entry") entry_address = ET.SubElement(mac_group_entry, "entry-address") entry_address.text = kwargs.pop('entry_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "mac_group_mac_group_entry_entry_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "mac_group", "=", "ET", ".", "SubElement", "(", "config", ",", "\"mac-group\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-mac-address-table\"", ")", "mac_group_id_key", "=", "ET", ".", "SubElement", "(", "mac_group", ",", "\"mac-group-id\"", ")", "mac_group_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_group_id'", ")", "mac_group_entry", "=", "ET", ".", "SubElement", "(", "mac_group", ",", "\"mac-group-entry\"", ")", "entry_address", "=", "ET", ".", "SubElement", "(", "mac_group_entry", ",", "\"entry-address\"", ")", "entry_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'entry_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/osx_file_parser.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/osx_file_parser.py#L196-L217
def Parse(self, statentry, file_object, knowledge_base): """Parse the Plist file.""" plist = biplist.readPlist(file_object) if not isinstance(plist, list): raise parser.ParseError( "InstallHistory plist is a '%s', expecting a list" % type(plist)) packages = [] for sw in plist: packages.append( rdf_client.SoftwarePackage( name=sw.get("displayName"), version=sw.get("displayVersion"), description=",".join(sw.get("packageIdentifiers")), # TODO(hanuszczak): make installed_on an RDFDatetime installed_on=_DateToEpoch(sw.get("date")), install_state=rdf_client.SoftwarePackage.InstallState.INSTALLED)) if packages: yield rdf_client.SoftwarePackages(packages=packages)
[ "def", "Parse", "(", "self", ",", "statentry", ",", "file_object", ",", "knowledge_base", ")", ":", "plist", "=", "biplist", ".", "readPlist", "(", "file_object", ")", "if", "not", "isinstance", "(", "plist", ",", "list", ")", ":", "raise", "parser", ".", "ParseError", "(", "\"InstallHistory plist is a '%s', expecting a list\"", "%", "type", "(", "plist", ")", ")", "packages", "=", "[", "]", "for", "sw", "in", "plist", ":", "packages", ".", "append", "(", "rdf_client", ".", "SoftwarePackage", "(", "name", "=", "sw", ".", "get", "(", "\"displayName\"", ")", ",", "version", "=", "sw", ".", "get", "(", "\"displayVersion\"", ")", ",", "description", "=", "\",\"", ".", "join", "(", "sw", ".", "get", "(", "\"packageIdentifiers\"", ")", ")", ",", "# TODO(hanuszczak): make installed_on an RDFDatetime", "installed_on", "=", "_DateToEpoch", "(", "sw", ".", "get", "(", "\"date\"", ")", ")", ",", "install_state", "=", "rdf_client", ".", "SoftwarePackage", ".", "InstallState", ".", "INSTALLED", ")", ")", "if", "packages", ":", "yield", "rdf_client", ".", "SoftwarePackages", "(", "packages", "=", "packages", ")" ]
Parse the Plist file.
[ "Parse", "the", "Plist", "file", "." ]
python
train
saltstack/salt
salt/modules/ebuildpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ebuildpkg.py#L392-L403
def porttree_matches(name): ''' Returns a list containing the matches for a given package name from the portage tree. Note that the specific version of the package will not be provided for packages that have several versions in the portage tree, but rather the name of the package (i.e. "dev-python/paramiko"). ''' matches = [] for category in _porttree().dbapi.categories: if _porttree().dbapi.cp_list(category + "/" + name): matches.append(category + "/" + name) return matches
[ "def", "porttree_matches", "(", "name", ")", ":", "matches", "=", "[", "]", "for", "category", "in", "_porttree", "(", ")", ".", "dbapi", ".", "categories", ":", "if", "_porttree", "(", ")", ".", "dbapi", ".", "cp_list", "(", "category", "+", "\"/\"", "+", "name", ")", ":", "matches", ".", "append", "(", "category", "+", "\"/\"", "+", "name", ")", "return", "matches" ]
Returns a list containing the matches for a given package name from the portage tree. Note that the specific version of the package will not be provided for packages that have several versions in the portage tree, but rather the name of the package (i.e. "dev-python/paramiko").
[ "Returns", "a", "list", "containing", "the", "matches", "for", "a", "given", "package", "name", "from", "the", "portage", "tree", ".", "Note", "that", "the", "specific", "version", "of", "the", "package", "will", "not", "be", "provided", "for", "packages", "that", "have", "several", "versions", "in", "the", "portage", "tree", "but", "rather", "the", "name", "of", "the", "package", "(", "i", ".", "e", ".", "dev", "-", "python", "/", "paramiko", ")", "." ]
python
train
spyder-ide/spyder
spyder/widgets/fileswitcher.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/fileswitcher.py#L648-L767
def setup_file_list(self, filter_text, current_path): """Setup list widget content for file list display.""" short_paths = shorten_paths(self.paths, self.save_status) paths = self.paths icons = self.icons results = [] trying_for_line_number = ':' in filter_text # Get optional line number if trying_for_line_number: filter_text, line_number = filter_text.split(':') if line_number == '': line_number = None # Get all the available filenames scores = get_search_scores('', self.filenames, template="<b>{0}</b>") else: line_number = None # Get all available filenames and get the scores for # "fuzzy" matching scores = get_search_scores(filter_text, self.filenames, template="<b>{0}</b>") # Get max width to determine if shortpaths should be used max_width = self.get_item_size(paths)[0] self.fix_size(paths) # Build the text that will appear on the list widget rich_font = CONF.get('appearance', 'rich_font/size', 10) if sys.platform == 'darwin': path_text_font_size = rich_font filename_text_font_size = path_text_font_size + 2 elif os.name == 'nt': path_text_font_size = rich_font filename_text_font_size = path_text_font_size + 1 elif is_ubuntu(): path_text_font_size = rich_font - 2 filename_text_font_size = path_text_font_size + 1 else: path_text_font_size = rich_font filename_text_font_size = path_text_font_size + 1 for index, score in enumerate(scores): text, rich_text, score_value = score if score_value != -1: text_item = ("<span style='color:{0:}; font-size:{1:}pt'>{2:}" "</span>").format(ima.MAIN_FG_COLOR, filename_text_font_size, rich_text.replace('&', '')) if trying_for_line_number: text_item += " [{0:} {1:}]".format(self.line_count[index], _("lines")) if max_width > self.list.width(): text_item += (u" &nbsp; <span style='color:{0:};" "font-size:{1:}pt'>{2:}" "</span>").format(self.PATH_FG_COLOR, path_text_font_size, short_paths[index]) else: text_item += (u" &nbsp; <span style='color:{0:};" "font-size:{1:}pt'>{2:}" "</span>").format(self.PATH_FG_COLOR, path_text_font_size, paths[index]) if (trying_for_line_number and self.line_count[index] != 0 or not trying_for_line_number): results.append((score_value, index, text_item)) # Sort the obtained scores and populate the list widget self.filtered_path = [] plugin = None for result in sorted(results): index = result[1] path = paths[index] if sys.platform == 'darwin': scale_factor = 0.9 elif os.name == 'nt': scale_factor = 0.8 elif is_ubuntu(): scale_factor = 0.6 else: scale_factor = 0.9 icon = ima.get_icon_by_extension(path, scale_factor) text = '' try: title = self.widgets[index][1].get_plugin_title().split(' - ') if plugin != title[0]: plugin = title[0] text += ("<br><big style='color:{0:}'>" "<b>{1:}</b></big><br>").format(ima.MAIN_FG_COLOR, plugin) item = QListWidgetItem(text) item.setToolTip(path) item.setSizeHint(QSize(0, 25)) item.setFlags(Qt.ItemIsEditable) self.list.addItem(item) self.filtered_path.append(path) except: # The widget using the fileswitcher is not a plugin pass text = '' text += result[-1] item = QListWidgetItem(icon, text) item.setToolTip(path) item.setSizeHint(QSize(0, 25)) self.list.addItem(item) self.filtered_path.append(path) # To adjust the delegate layout for KDE themes self.list.files_list = True # Move selected item in list accordingly and update list size if current_path in self.filtered_path: self.set_current_row(self.filtered_path.index(current_path)) elif self.filtered_path: self.set_current_row(0) # If a line number is searched look for it self.line_number = line_number self.goto_line(line_number)
[ "def", "setup_file_list", "(", "self", ",", "filter_text", ",", "current_path", ")", ":", "short_paths", "=", "shorten_paths", "(", "self", ".", "paths", ",", "self", ".", "save_status", ")", "paths", "=", "self", ".", "paths", "icons", "=", "self", ".", "icons", "results", "=", "[", "]", "trying_for_line_number", "=", "':'", "in", "filter_text", "# Get optional line number", "if", "trying_for_line_number", ":", "filter_text", ",", "line_number", "=", "filter_text", ".", "split", "(", "':'", ")", "if", "line_number", "==", "''", ":", "line_number", "=", "None", "# Get all the available filenames", "scores", "=", "get_search_scores", "(", "''", ",", "self", ".", "filenames", ",", "template", "=", "\"<b>{0}</b>\"", ")", "else", ":", "line_number", "=", "None", "# Get all available filenames and get the scores for", "# \"fuzzy\" matching", "scores", "=", "get_search_scores", "(", "filter_text", ",", "self", ".", "filenames", ",", "template", "=", "\"<b>{0}</b>\"", ")", "# Get max width to determine if shortpaths should be used", "max_width", "=", "self", ".", "get_item_size", "(", "paths", ")", "[", "0", "]", "self", ".", "fix_size", "(", "paths", ")", "# Build the text that will appear on the list widget", "rich_font", "=", "CONF", ".", "get", "(", "'appearance'", ",", "'rich_font/size'", ",", "10", ")", "if", "sys", ".", "platform", "==", "'darwin'", ":", "path_text_font_size", "=", "rich_font", "filename_text_font_size", "=", "path_text_font_size", "+", "2", "elif", "os", ".", "name", "==", "'nt'", ":", "path_text_font_size", "=", "rich_font", "filename_text_font_size", "=", "path_text_font_size", "+", "1", "elif", "is_ubuntu", "(", ")", ":", "path_text_font_size", "=", "rich_font", "-", "2", "filename_text_font_size", "=", "path_text_font_size", "+", "1", "else", ":", "path_text_font_size", "=", "rich_font", "filename_text_font_size", "=", "path_text_font_size", "+", "1", "for", "index", ",", "score", "in", "enumerate", "(", "scores", ")", ":", "text", ",", "rich_text", ",", "score_value", "=", "score", "if", "score_value", "!=", "-", "1", ":", "text_item", "=", "(", "\"<span style='color:{0:}; font-size:{1:}pt'>{2:}\"", "\"</span>\"", ")", ".", "format", "(", "ima", ".", "MAIN_FG_COLOR", ",", "filename_text_font_size", ",", "rich_text", ".", "replace", "(", "'&'", ",", "''", ")", ")", "if", "trying_for_line_number", ":", "text_item", "+=", "\" [{0:} {1:}]\"", ".", "format", "(", "self", ".", "line_count", "[", "index", "]", ",", "_", "(", "\"lines\"", ")", ")", "if", "max_width", ">", "self", ".", "list", ".", "width", "(", ")", ":", "text_item", "+=", "(", "u\" &nbsp; <span style='color:{0:};\"", "\"font-size:{1:}pt'>{2:}\"", "\"</span>\"", ")", ".", "format", "(", "self", ".", "PATH_FG_COLOR", ",", "path_text_font_size", ",", "short_paths", "[", "index", "]", ")", "else", ":", "text_item", "+=", "(", "u\" &nbsp; <span style='color:{0:};\"", "\"font-size:{1:}pt'>{2:}\"", "\"</span>\"", ")", ".", "format", "(", "self", ".", "PATH_FG_COLOR", ",", "path_text_font_size", ",", "paths", "[", "index", "]", ")", "if", "(", "trying_for_line_number", "and", "self", ".", "line_count", "[", "index", "]", "!=", "0", "or", "not", "trying_for_line_number", ")", ":", "results", ".", "append", "(", "(", "score_value", ",", "index", ",", "text_item", ")", ")", "# Sort the obtained scores and populate the list widget", "self", ".", "filtered_path", "=", "[", "]", "plugin", "=", "None", "for", "result", "in", "sorted", "(", "results", ")", ":", "index", "=", "result", "[", "1", "]", "path", "=", "paths", "[", "index", "]", "if", "sys", ".", "platform", "==", "'darwin'", ":", "scale_factor", "=", "0.9", "elif", "os", ".", "name", "==", "'nt'", ":", "scale_factor", "=", "0.8", "elif", "is_ubuntu", "(", ")", ":", "scale_factor", "=", "0.6", "else", ":", "scale_factor", "=", "0.9", "icon", "=", "ima", ".", "get_icon_by_extension", "(", "path", ",", "scale_factor", ")", "text", "=", "''", "try", ":", "title", "=", "self", ".", "widgets", "[", "index", "]", "[", "1", "]", ".", "get_plugin_title", "(", ")", ".", "split", "(", "' - '", ")", "if", "plugin", "!=", "title", "[", "0", "]", ":", "plugin", "=", "title", "[", "0", "]", "text", "+=", "(", "\"<br><big style='color:{0:}'>\"", "\"<b>{1:}</b></big><br>\"", ")", ".", "format", "(", "ima", ".", "MAIN_FG_COLOR", ",", "plugin", ")", "item", "=", "QListWidgetItem", "(", "text", ")", "item", ".", "setToolTip", "(", "path", ")", "item", ".", "setSizeHint", "(", "QSize", "(", "0", ",", "25", ")", ")", "item", ".", "setFlags", "(", "Qt", ".", "ItemIsEditable", ")", "self", ".", "list", ".", "addItem", "(", "item", ")", "self", ".", "filtered_path", ".", "append", "(", "path", ")", "except", ":", "# The widget using the fileswitcher is not a plugin", "pass", "text", "=", "''", "text", "+=", "result", "[", "-", "1", "]", "item", "=", "QListWidgetItem", "(", "icon", ",", "text", ")", "item", ".", "setToolTip", "(", "path", ")", "item", ".", "setSizeHint", "(", "QSize", "(", "0", ",", "25", ")", ")", "self", ".", "list", ".", "addItem", "(", "item", ")", "self", ".", "filtered_path", ".", "append", "(", "path", ")", "# To adjust the delegate layout for KDE themes", "self", ".", "list", ".", "files_list", "=", "True", "# Move selected item in list accordingly and update list size", "if", "current_path", "in", "self", ".", "filtered_path", ":", "self", ".", "set_current_row", "(", "self", ".", "filtered_path", ".", "index", "(", "current_path", ")", ")", "elif", "self", ".", "filtered_path", ":", "self", ".", "set_current_row", "(", "0", ")", "# If a line number is searched look for it", "self", ".", "line_number", "=", "line_number", "self", ".", "goto_line", "(", "line_number", ")" ]
Setup list widget content for file list display.
[ "Setup", "list", "widget", "content", "for", "file", "list", "display", "." ]
python
train
allenai/allennlp
allennlp/common/params.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L35-L72
def infer_and_cast(value: Any): """ In some cases we'll be feeding params dicts to functions we don't own; for example, PyTorch optimizers. In that case we can't use ``pop_int`` or similar to force casts (which means you can't specify ``int`` parameters using environment variables). This function takes something that looks JSON-like and recursively casts things that look like (bool, int, float) to (bool, int, float). """ # pylint: disable=too-many-return-statements if isinstance(value, (int, float, bool)): # Already one of our desired types, so leave as is. return value elif isinstance(value, list): # Recursively call on each list element. return [infer_and_cast(item) for item in value] elif isinstance(value, dict): # Recursively call on each dict value. return {key: infer_and_cast(item) for key, item in value.items()} elif isinstance(value, str): # If it looks like a bool, make it a bool. if value.lower() == "true": return True elif value.lower() == "false": return False else: # See if it could be an int. try: return int(value) except ValueError: pass # See if it could be a float. try: return float(value) except ValueError: # Just return it as a string. return value else: raise ValueError(f"cannot infer type of {value}")
[ "def", "infer_and_cast", "(", "value", ":", "Any", ")", ":", "# pylint: disable=too-many-return-statements", "if", "isinstance", "(", "value", ",", "(", "int", ",", "float", ",", "bool", ")", ")", ":", "# Already one of our desired types, so leave as is.", "return", "value", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "# Recursively call on each list element.", "return", "[", "infer_and_cast", "(", "item", ")", "for", "item", "in", "value", "]", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "# Recursively call on each dict value.", "return", "{", "key", ":", "infer_and_cast", "(", "item", ")", "for", "key", ",", "item", "in", "value", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "# If it looks like a bool, make it a bool.", "if", "value", ".", "lower", "(", ")", "==", "\"true\"", ":", "return", "True", "elif", "value", ".", "lower", "(", ")", "==", "\"false\"", ":", "return", "False", "else", ":", "# See if it could be an int.", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "# See if it could be a float.", "try", ":", "return", "float", "(", "value", ")", "except", "ValueError", ":", "# Just return it as a string.", "return", "value", "else", ":", "raise", "ValueError", "(", "f\"cannot infer type of {value}\"", ")" ]
In some cases we'll be feeding params dicts to functions we don't own; for example, PyTorch optimizers. In that case we can't use ``pop_int`` or similar to force casts (which means you can't specify ``int`` parameters using environment variables). This function takes something that looks JSON-like and recursively casts things that look like (bool, int, float) to (bool, int, float).
[ "In", "some", "cases", "we", "ll", "be", "feeding", "params", "dicts", "to", "functions", "we", "don", "t", "own", ";", "for", "example", "PyTorch", "optimizers", ".", "In", "that", "case", "we", "can", "t", "use", "pop_int", "or", "similar", "to", "force", "casts", "(", "which", "means", "you", "can", "t", "specify", "int", "parameters", "using", "environment", "variables", ")", ".", "This", "function", "takes", "something", "that", "looks", "JSON", "-", "like", "and", "recursively", "casts", "things", "that", "look", "like", "(", "bool", "int", "float", ")", "to", "(", "bool", "int", "float", ")", "." ]
python
train
aganezov/bg
bg/breakpoint_graph.py
https://github.com/aganezov/bg/blob/1ec758193441e49e7b34e0da09571480f4c24455/bg/breakpoint_graph.py#L733-L842
def apply_kbreak(self, kbreak, merge=True): """ Check validity of supplied k-break and then applies it to current :class:`BreakpointGraph` Only :class:`bg.kbreak.KBreak` (or its heirs) instances are allowed as ``kbreak`` argument. KBreak must correspond to the valid kbreak and, since some changes to its internals might have been done since its creation, a validity check in terms of starting/resulting edges is performed. All vertices in supplied KBreak (except for paired infinity vertices) must be present in current :class:`BreakpointGraph`. For all supplied pairs of vertices (except for paired infinity vertices), there must be edges between such pairs of vertices, at least one of which must contain a multicolor matching a multicolor of supplied kbreak. Edges of specified in kbreak multicolor are deleted between supplied pairs of vertices in kbreak.start_edges (except for paired infinity vertices). New edges of specified in kbreak multicolor are added between all pairs of vertices in kbreak.result_edges (except for paired infinity vertices). If after the kbreak application there is an infinity vertex, that now has no edges incident to it, it is deleted form the current :class:`BreakpointGraph`. :param kbreak: a k-break to be applied to current :class:`BreakpointGraph` :type kbreak: `bg.kbreak.KBreak` :param merge: a flag to indicate on how edges, that will be created by a k-break, will be added to current :class:`BreakpointGraph` :type merge: ``Boolean`` :return: nothing, performs inplace changes :rtype: ``None`` :raises: ``ValueError``, ``TypeError`` """ ############################################################################################################ # # k-break must ba valid to be applied # ############################################################################################################ vertices = {} edge_data = {} if not isinstance(kbreak, KBreak): raise TypeError("Only KBreak and derivatives are allowed as kbreak argument") if not KBreak.valid_kbreak_matchings(kbreak.start_edges, kbreak.result_edges): raise ValueError("Supplied KBreak is not valid form perspective of starting/resulting sets of vertices") for vertex1, vertex2 in kbreak.start_edges: if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex: ############################################################################################################ # # when we encounter a fully infinity edge (both vertices are infinity vertices) # we shall not check if they are present in the current graph, because hat portion of a kbreak is artificial # ############################################################################################################ continue if vertex1 not in self.bg or vertex2 not in self.bg: raise ValueError("Supplied KBreak targets vertices (`{v1}` and `{v2}`) at least one of which " "does not exist in current BreakpointGraph" "".format(v1=vertex1.name, v2=vertex2.name)) for vertex1, vertex2 in kbreak.start_edges: if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex: continue for bgedge in self.__edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2): ############################################################################################################ # # at least one edge between supplied pair of vertices must contain a multicolor that is specified for the kbreak # ############################################################################################################ if kbreak.multicolor <= bgedge.multicolor: break else: raise ValueError("Some targeted by kbreak edge with specified multicolor does not exists") for vertex1, vertex2 in kbreak.start_edges: if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex: continue v1 = self.__get_vertex_by_name(vertex_name=vertex1.name) vertices[v1] = v1 v2 = self.__get_vertex_by_name(vertex_name=vertex2.name) vertices[v2] = v2 bgedge = BGEdge(vertex1=v1, vertex2=v2, multicolor=kbreak.multicolor) candidate_data, candidate_id, candidate_score = self.__determine_most_suitable_edge_for_deletion( bgedge=bgedge) data = candidate_data["attr_dict"]["data"] edge_data[v1] = data edge_data[v2] = data self.__delete_bgedge(bgedge=bgedge, keep_vertices=True) for vertex_set in kbreak.start_edges: for vertex in vertex_set: if vertex.is_infinity_vertex and vertex in self.bg: ############################################################################################################ # # after the first portion of a kbreak is performed one must make sure we don't leave any infinity vertices # that have edges going to them, as infinity vertex is a special artificial vertex # and it has meaning only if there are edges going to / from it # ############################################################################################################ if len(list(self.get_edges_by_vertex(vertex=vertex))) == 0: self.bg.remove_node(vertex) for vertex1, vertex2 in kbreak.result_edges: if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex: ############################################################################################################ # # if we encounter a pair of infinity vertices in result edges set, we shall not add them # as at least a part of kbreak corresponded to fusion # and those infinity edges on their own won't have any meaning # ############################################################################################################ continue origin = kbreak.data.get("origin", None) v1 = vertices.get(vertex1, vertex1) v2 = vertices.get(vertex2, vertex2) bg_edge = BGEdge(vertex1=v1, vertex2=v2, multicolor=kbreak.multicolor) if "origin" in bg_edge.data: bg_edge.data["origin"] = origin if kbreak.is_a_fusion: edge1_data = edge_data[v1] edge2_data = edge_data[v2] merged_edge_fragment_data = merge_fragment_edge_data(edge1_data["fragment"], edge2_data["fragment"]) result_edge_data = {} recursive_dict_update(result_edge_data, edge1_data) recursive_dict_update(result_edge_data, edge2_data) recursive_dict_update(result_edge_data, {"fragment": merged_edge_fragment_data}) recursive_dict_update(bg_edge.data, result_edge_data) self.__add_bgedge(bg_edge, merge=merge)
[ "def", "apply_kbreak", "(", "self", ",", "kbreak", ",", "merge", "=", "True", ")", ":", "############################################################################################################", "#", "# k-break must ba valid to be applied", "#", "############################################################################################################", "vertices", "=", "{", "}", "edge_data", "=", "{", "}", "if", "not", "isinstance", "(", "kbreak", ",", "KBreak", ")", ":", "raise", "TypeError", "(", "\"Only KBreak and derivatives are allowed as kbreak argument\"", ")", "if", "not", "KBreak", ".", "valid_kbreak_matchings", "(", "kbreak", ".", "start_edges", ",", "kbreak", ".", "result_edges", ")", ":", "raise", "ValueError", "(", "\"Supplied KBreak is not valid form perspective of starting/resulting sets of vertices\"", ")", "for", "vertex1", ",", "vertex2", "in", "kbreak", ".", "start_edges", ":", "if", "vertex1", ".", "is_infinity_vertex", "and", "vertex2", ".", "is_infinity_vertex", ":", "############################################################################################################", "#", "# when we encounter a fully infinity edge (both vertices are infinity vertices)", "# we shall not check if they are present in the current graph, because hat portion of a kbreak is artificial", "#", "############################################################################################################", "continue", "if", "vertex1", "not", "in", "self", ".", "bg", "or", "vertex2", "not", "in", "self", ".", "bg", ":", "raise", "ValueError", "(", "\"Supplied KBreak targets vertices (`{v1}` and `{v2}`) at least one of which \"", "\"does not exist in current BreakpointGraph\"", "\"\"", ".", "format", "(", "v1", "=", "vertex1", ".", "name", ",", "v2", "=", "vertex2", ".", "name", ")", ")", "for", "vertex1", ",", "vertex2", "in", "kbreak", ".", "start_edges", ":", "if", "vertex1", ".", "is_infinity_vertex", "and", "vertex2", ".", "is_infinity_vertex", ":", "continue", "for", "bgedge", "in", "self", ".", "__edges_between_two_vertices", "(", "vertex1", "=", "vertex1", ",", "vertex2", "=", "vertex2", ")", ":", "############################################################################################################", "#", "# at least one edge between supplied pair of vertices must contain a multicolor that is specified for the kbreak", "#", "############################################################################################################", "if", "kbreak", ".", "multicolor", "<=", "bgedge", ".", "multicolor", ":", "break", "else", ":", "raise", "ValueError", "(", "\"Some targeted by kbreak edge with specified multicolor does not exists\"", ")", "for", "vertex1", ",", "vertex2", "in", "kbreak", ".", "start_edges", ":", "if", "vertex1", ".", "is_infinity_vertex", "and", "vertex2", ".", "is_infinity_vertex", ":", "continue", "v1", "=", "self", ".", "__get_vertex_by_name", "(", "vertex_name", "=", "vertex1", ".", "name", ")", "vertices", "[", "v1", "]", "=", "v1", "v2", "=", "self", ".", "__get_vertex_by_name", "(", "vertex_name", "=", "vertex2", ".", "name", ")", "vertices", "[", "v2", "]", "=", "v2", "bgedge", "=", "BGEdge", "(", "vertex1", "=", "v1", ",", "vertex2", "=", "v2", ",", "multicolor", "=", "kbreak", ".", "multicolor", ")", "candidate_data", ",", "candidate_id", ",", "candidate_score", "=", "self", ".", "__determine_most_suitable_edge_for_deletion", "(", "bgedge", "=", "bgedge", ")", "data", "=", "candidate_data", "[", "\"attr_dict\"", "]", "[", "\"data\"", "]", "edge_data", "[", "v1", "]", "=", "data", "edge_data", "[", "v2", "]", "=", "data", "self", ".", "__delete_bgedge", "(", "bgedge", "=", "bgedge", ",", "keep_vertices", "=", "True", ")", "for", "vertex_set", "in", "kbreak", ".", "start_edges", ":", "for", "vertex", "in", "vertex_set", ":", "if", "vertex", ".", "is_infinity_vertex", "and", "vertex", "in", "self", ".", "bg", ":", "############################################################################################################", "#", "# after the first portion of a kbreak is performed one must make sure we don't leave any infinity vertices", "# that have edges going to them, as infinity vertex is a special artificial vertex", "# and it has meaning only if there are edges going to / from it", "#", "############################################################################################################", "if", "len", "(", "list", "(", "self", ".", "get_edges_by_vertex", "(", "vertex", "=", "vertex", ")", ")", ")", "==", "0", ":", "self", ".", "bg", ".", "remove_node", "(", "vertex", ")", "for", "vertex1", ",", "vertex2", "in", "kbreak", ".", "result_edges", ":", "if", "vertex1", ".", "is_infinity_vertex", "and", "vertex2", ".", "is_infinity_vertex", ":", "############################################################################################################", "#", "# if we encounter a pair of infinity vertices in result edges set, we shall not add them", "# as at least a part of kbreak corresponded to fusion", "# and those infinity edges on their own won't have any meaning", "#", "############################################################################################################", "continue", "origin", "=", "kbreak", ".", "data", ".", "get", "(", "\"origin\"", ",", "None", ")", "v1", "=", "vertices", ".", "get", "(", "vertex1", ",", "vertex1", ")", "v2", "=", "vertices", ".", "get", "(", "vertex2", ",", "vertex2", ")", "bg_edge", "=", "BGEdge", "(", "vertex1", "=", "v1", ",", "vertex2", "=", "v2", ",", "multicolor", "=", "kbreak", ".", "multicolor", ")", "if", "\"origin\"", "in", "bg_edge", ".", "data", ":", "bg_edge", ".", "data", "[", "\"origin\"", "]", "=", "origin", "if", "kbreak", ".", "is_a_fusion", ":", "edge1_data", "=", "edge_data", "[", "v1", "]", "edge2_data", "=", "edge_data", "[", "v2", "]", "merged_edge_fragment_data", "=", "merge_fragment_edge_data", "(", "edge1_data", "[", "\"fragment\"", "]", ",", "edge2_data", "[", "\"fragment\"", "]", ")", "result_edge_data", "=", "{", "}", "recursive_dict_update", "(", "result_edge_data", ",", "edge1_data", ")", "recursive_dict_update", "(", "result_edge_data", ",", "edge2_data", ")", "recursive_dict_update", "(", "result_edge_data", ",", "{", "\"fragment\"", ":", "merged_edge_fragment_data", "}", ")", "recursive_dict_update", "(", "bg_edge", ".", "data", ",", "result_edge_data", ")", "self", ".", "__add_bgedge", "(", "bg_edge", ",", "merge", "=", "merge", ")" ]
Check validity of supplied k-break and then applies it to current :class:`BreakpointGraph` Only :class:`bg.kbreak.KBreak` (or its heirs) instances are allowed as ``kbreak`` argument. KBreak must correspond to the valid kbreak and, since some changes to its internals might have been done since its creation, a validity check in terms of starting/resulting edges is performed. All vertices in supplied KBreak (except for paired infinity vertices) must be present in current :class:`BreakpointGraph`. For all supplied pairs of vertices (except for paired infinity vertices), there must be edges between such pairs of vertices, at least one of which must contain a multicolor matching a multicolor of supplied kbreak. Edges of specified in kbreak multicolor are deleted between supplied pairs of vertices in kbreak.start_edges (except for paired infinity vertices). New edges of specified in kbreak multicolor are added between all pairs of vertices in kbreak.result_edges (except for paired infinity vertices). If after the kbreak application there is an infinity vertex, that now has no edges incident to it, it is deleted form the current :class:`BreakpointGraph`. :param kbreak: a k-break to be applied to current :class:`BreakpointGraph` :type kbreak: `bg.kbreak.KBreak` :param merge: a flag to indicate on how edges, that will be created by a k-break, will be added to current :class:`BreakpointGraph` :type merge: ``Boolean`` :return: nothing, performs inplace changes :rtype: ``None`` :raises: ``ValueError``, ``TypeError``
[ "Check", "validity", "of", "supplied", "k", "-", "break", "and", "then", "applies", "it", "to", "current", ":", "class", ":", "BreakpointGraph" ]
python
train
openstack/monasca-common
monasca_common/kafka_lib/consumer/kafka.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/consumer/kafka.py#L521-L586
def commit(self): """Store consumed message offsets (marked via task_done()) to kafka cluster for this consumer_group. Returns: True on success, or False if no offsets were found for commit Note: this functionality requires server version >=0.8.1.1 https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI """ if not self._config['group_id']: logger.warning('Cannot commit without a group_id!') raise KafkaConfigurationError( 'Attempted to commit offsets ' 'without a configured consumer group (group_id)' ) # API supports storing metadata with each commit # but for now it is unused metadata = b'' offsets = self._offsets.task_done commits = [] for topic_partition, task_done_offset in six.iteritems(offsets): # Skip if None if task_done_offset is None: continue # Commit offsets as the next offset to fetch # which is consistent with the Java Client # task_done is marked by messages consumed, # so add one to mark the next message for fetching commit_offset = (task_done_offset + 1) # Skip if no change from previous committed if commit_offset == self._offsets.commit[topic_partition]: continue commits.append( OffsetCommitRequest(topic_partition[0], topic_partition[1], commit_offset, metadata) ) if commits: logger.info('committing consumer offsets to group %s', self._config['group_id']) resps = self._client.send_offset_commit_request( kafka_bytestring(self._config['group_id']), commits, fail_on_error=False ) for r in resps: check_error(r) topic_partition = (r.topic, r.partition) task_done = self._offsets.task_done[topic_partition] self._offsets.commit[topic_partition] = (task_done + 1) if self._config['auto_commit_enable']: self._reset_auto_commit() return True else: logger.info('No new offsets found to commit in group %s', self._config['group_id']) return False
[ "def", "commit", "(", "self", ")", ":", "if", "not", "self", ".", "_config", "[", "'group_id'", "]", ":", "logger", ".", "warning", "(", "'Cannot commit without a group_id!'", ")", "raise", "KafkaConfigurationError", "(", "'Attempted to commit offsets '", "'without a configured consumer group (group_id)'", ")", "# API supports storing metadata with each commit", "# but for now it is unused", "metadata", "=", "b''", "offsets", "=", "self", ".", "_offsets", ".", "task_done", "commits", "=", "[", "]", "for", "topic_partition", ",", "task_done_offset", "in", "six", ".", "iteritems", "(", "offsets", ")", ":", "# Skip if None", "if", "task_done_offset", "is", "None", ":", "continue", "# Commit offsets as the next offset to fetch", "# which is consistent with the Java Client", "# task_done is marked by messages consumed,", "# so add one to mark the next message for fetching", "commit_offset", "=", "(", "task_done_offset", "+", "1", ")", "# Skip if no change from previous committed", "if", "commit_offset", "==", "self", ".", "_offsets", ".", "commit", "[", "topic_partition", "]", ":", "continue", "commits", ".", "append", "(", "OffsetCommitRequest", "(", "topic_partition", "[", "0", "]", ",", "topic_partition", "[", "1", "]", ",", "commit_offset", ",", "metadata", ")", ")", "if", "commits", ":", "logger", ".", "info", "(", "'committing consumer offsets to group %s'", ",", "self", ".", "_config", "[", "'group_id'", "]", ")", "resps", "=", "self", ".", "_client", ".", "send_offset_commit_request", "(", "kafka_bytestring", "(", "self", ".", "_config", "[", "'group_id'", "]", ")", ",", "commits", ",", "fail_on_error", "=", "False", ")", "for", "r", "in", "resps", ":", "check_error", "(", "r", ")", "topic_partition", "=", "(", "r", ".", "topic", ",", "r", ".", "partition", ")", "task_done", "=", "self", ".", "_offsets", ".", "task_done", "[", "topic_partition", "]", "self", ".", "_offsets", ".", "commit", "[", "topic_partition", "]", "=", "(", "task_done", "+", "1", ")", "if", "self", ".", "_config", "[", "'auto_commit_enable'", "]", ":", "self", ".", "_reset_auto_commit", "(", ")", "return", "True", "else", ":", "logger", ".", "info", "(", "'No new offsets found to commit in group %s'", ",", "self", ".", "_config", "[", "'group_id'", "]", ")", "return", "False" ]
Store consumed message offsets (marked via task_done()) to kafka cluster for this consumer_group. Returns: True on success, or False if no offsets were found for commit Note: this functionality requires server version >=0.8.1.1 https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
[ "Store", "consumed", "message", "offsets", "(", "marked", "via", "task_done", "()", ")", "to", "kafka", "cluster", "for", "this", "consumer_group", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConscript.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConscript.py#L462-L477
def EnsureSConsVersion(self, major, minor, revision=0): """Exit abnormally if the SCons version is not late enough.""" # split string to avoid replacement during build process if SCons.__version__ == '__' + 'VERSION__': SCons.Warnings.warn(SCons.Warnings.DevelopmentVersionWarning, "EnsureSConsVersion is ignored for development version") return scons_ver = self._get_major_minor_revision(SCons.__version__) if scons_ver < (major, minor, revision): if revision: scons_ver_string = '%d.%d.%d' % (major, minor, revision) else: scons_ver_string = '%d.%d' % (major, minor) print("SCons %s or greater required, but you have SCons %s" % \ (scons_ver_string, SCons.__version__)) sys.exit(2)
[ "def", "EnsureSConsVersion", "(", "self", ",", "major", ",", "minor", ",", "revision", "=", "0", ")", ":", "# split string to avoid replacement during build process", "if", "SCons", ".", "__version__", "==", "'__'", "+", "'VERSION__'", ":", "SCons", ".", "Warnings", ".", "warn", "(", "SCons", ".", "Warnings", ".", "DevelopmentVersionWarning", ",", "\"EnsureSConsVersion is ignored for development version\"", ")", "return", "scons_ver", "=", "self", ".", "_get_major_minor_revision", "(", "SCons", ".", "__version__", ")", "if", "scons_ver", "<", "(", "major", ",", "minor", ",", "revision", ")", ":", "if", "revision", ":", "scons_ver_string", "=", "'%d.%d.%d'", "%", "(", "major", ",", "minor", ",", "revision", ")", "else", ":", "scons_ver_string", "=", "'%d.%d'", "%", "(", "major", ",", "minor", ")", "print", "(", "\"SCons %s or greater required, but you have SCons %s\"", "%", "(", "scons_ver_string", ",", "SCons", ".", "__version__", ")", ")", "sys", ".", "exit", "(", "2", ")" ]
Exit abnormally if the SCons version is not late enough.
[ "Exit", "abnormally", "if", "the", "SCons", "version", "is", "not", "late", "enough", "." ]
python
train
nwilming/ocupy
ocupy/parallel.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L228-L245
def partition(self): """Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.""" step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
[ "def", "partition", "(", "self", ")", ":", "step", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "num_tasks", "/", "float", "(", "self", ".", "partitions", ")", ")", ")", "if", "self", ".", "indices", "==", "None", ":", "slice_ind", "=", "list", "(", "range", "(", "0", ",", "self", ".", "num_tasks", ",", "step", ")", ")", "for", "start", "in", "slice_ind", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "list", "(", "range", "(", "start", ",", "start", "+", "step", ")", ")", ")", "else", ":", "slice_ind", "=", "list", "(", "range", "(", "0", ",", "len", "(", "self", ".", "indices", ")", ",", "step", ")", ")", "for", "start", "in", "slice_ind", ":", "if", "start", "+", "step", "<=", "len", "(", "self", ".", "indices", ")", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "self", ".", "indices", "[", "start", ":", "start", "+", "step", "]", ")", "else", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "self", ".", "indices", "[", "start", ":", "]", ")" ]
Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.
[ "Partitions", "all", "tasks", "into", "groups", "of", "tasks", ".", "A", "group", "is", "represented", "by", "a", "task_store", "object", "that", "indexes", "a", "sub", "-", "set", "of", "tasks", "." ]
python
train
cloudant/python-cloudant
src/cloudant/feed.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/feed.py#L109-L135
def _validate(self, key, val, arg_types): """ Ensures that the key and the value are valid arguments to be used with the feed. """ if key in arg_types: arg_type = arg_types[key] else: if ANY_ARG not in arg_types: raise CloudantArgumentError(116, key) arg_type = arg_types[ANY_ARG] if arg_type == ANY_TYPE: return if (not isinstance(val, arg_type) or (isinstance(val, bool) and int in arg_type)): raise CloudantArgumentError(117, key, arg_type) if isinstance(val, int) and val < 0 and not isinstance(val, bool): raise CloudantArgumentError(118, key, val) if key == 'feed': valid_vals = ('continuous', 'normal', 'longpoll') if self._source == 'CouchDB': valid_vals = ('continuous', 'longpoll') if val not in valid_vals: raise CloudantArgumentError(119, val, valid_vals) if key == 'style' and val not in ('main_only', 'all_docs'): raise CloudantArgumentError(120, val)
[ "def", "_validate", "(", "self", ",", "key", ",", "val", ",", "arg_types", ")", ":", "if", "key", "in", "arg_types", ":", "arg_type", "=", "arg_types", "[", "key", "]", "else", ":", "if", "ANY_ARG", "not", "in", "arg_types", ":", "raise", "CloudantArgumentError", "(", "116", ",", "key", ")", "arg_type", "=", "arg_types", "[", "ANY_ARG", "]", "if", "arg_type", "==", "ANY_TYPE", ":", "return", "if", "(", "not", "isinstance", "(", "val", ",", "arg_type", ")", "or", "(", "isinstance", "(", "val", ",", "bool", ")", "and", "int", "in", "arg_type", ")", ")", ":", "raise", "CloudantArgumentError", "(", "117", ",", "key", ",", "arg_type", ")", "if", "isinstance", "(", "val", ",", "int", ")", "and", "val", "<", "0", "and", "not", "isinstance", "(", "val", ",", "bool", ")", ":", "raise", "CloudantArgumentError", "(", "118", ",", "key", ",", "val", ")", "if", "key", "==", "'feed'", ":", "valid_vals", "=", "(", "'continuous'", ",", "'normal'", ",", "'longpoll'", ")", "if", "self", ".", "_source", "==", "'CouchDB'", ":", "valid_vals", "=", "(", "'continuous'", ",", "'longpoll'", ")", "if", "val", "not", "in", "valid_vals", ":", "raise", "CloudantArgumentError", "(", "119", ",", "val", ",", "valid_vals", ")", "if", "key", "==", "'style'", "and", "val", "not", "in", "(", "'main_only'", ",", "'all_docs'", ")", ":", "raise", "CloudantArgumentError", "(", "120", ",", "val", ")" ]
Ensures that the key and the value are valid arguments to be used with the feed.
[ "Ensures", "that", "the", "key", "and", "the", "value", "are", "valid", "arguments", "to", "be", "used", "with", "the", "feed", "." ]
python
train
jdoda/sdl2hl
sdl2hl/renderer.py
https://github.com/jdoda/sdl2hl/blob/3b477e1e01cea5d8e15e9e5ef3a302ea460f5946/sdl2hl/renderer.py#L372-L376
def w(self): """int: The width of the texture in pixels.""" w = ffi.new('int *') check_int_err(lib.SDL_QueryTexture(self._ptr, ffi.NULL, ffi.NULL, w, ffi.NULL)) return w[0]
[ "def", "w", "(", "self", ")", ":", "w", "=", "ffi", ".", "new", "(", "'int *'", ")", "check_int_err", "(", "lib", ".", "SDL_QueryTexture", "(", "self", ".", "_ptr", ",", "ffi", ".", "NULL", ",", "ffi", ".", "NULL", ",", "w", ",", "ffi", ".", "NULL", ")", ")", "return", "w", "[", "0", "]" ]
int: The width of the texture in pixels.
[ "int", ":", "The", "width", "of", "the", "texture", "in", "pixels", "." ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/denovo.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/denovo.py#L185-L273
def create_background(bg_type, fafile, outfile, genome="hg18", width=200, nr_times=10, custom_background=None): """Create background of a specific type. Parameters ---------- bg_type : str Name of background type. fafile : str Name of input FASTA file. outfile : str Name of output FASTA file. genome : str, optional Genome name. width : int, optional Size of regions. nr_times : int, optional Generate this times as many background sequences as compared to input file. Returns ------- nr_seqs : int Number of sequences created. """ width = int(width) config = MotifConfig() fg = Fasta(fafile) if bg_type in ["genomic", "gc"]: if not genome: logger.error("Need a genome to create background") sys.exit(1) if bg_type == "random": f = MarkovFasta(fg, k=1, n=nr_times * len(fg)) logger.debug("Random background: %s", outfile) elif bg_type == "genomic": logger.debug("Creating genomic background") f = RandomGenomicFasta(genome, width, nr_times * len(fg)) elif bg_type == "gc": logger.debug("Creating GC matched background") f = MatchedGcFasta(fafile, genome, nr_times * len(fg)) logger.debug("GC matched background: %s", outfile) elif bg_type == "promoter": fname = Genome(genome).filename gene_file = fname.replace(".fa", ".annotation.bed.gz") if not gene_file: gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome) if not os.path.exists(gene_file): print("Could not find a gene file for genome {}") print("Did you use the --annotation flag for genomepy?") print("Alternatively make sure there is a file called {}.bed in {}".format(genome, config.get_gene_dir())) raise ValueError() logger.info( "Creating random promoter background (%s, using genes in %s)", genome, gene_file) f = PromoterFasta(gene_file, genome, width, nr_times * len(fg)) logger.debug("Random promoter background: %s", outfile) elif bg_type == "custom": bg_file = custom_background if not bg_file: raise IOError( "Background file not specified!") if not os.path.exists(bg_file): raise IOError( "Custom background file %s does not exist!", bg_file) else: logger.info("Copying custom background file %s to %s.", bg_file, outfile) f = Fasta(bg_file) l = np.median([len(seq) for seq in f.seqs]) if l < (width * 0.95) or l > (width * 1.05): logger.warn( "The custom background file %s contains sequences with a " "median length of %s, while GimmeMotifs predicts motifs in sequences " "of length %s. This will influence the statistics! It is recommended " "to use background sequences of the same length.", bg_file, l, width) f.writefasta(outfile) return len(f)
[ "def", "create_background", "(", "bg_type", ",", "fafile", ",", "outfile", ",", "genome", "=", "\"hg18\"", ",", "width", "=", "200", ",", "nr_times", "=", "10", ",", "custom_background", "=", "None", ")", ":", "width", "=", "int", "(", "width", ")", "config", "=", "MotifConfig", "(", ")", "fg", "=", "Fasta", "(", "fafile", ")", "if", "bg_type", "in", "[", "\"genomic\"", ",", "\"gc\"", "]", ":", "if", "not", "genome", ":", "logger", ".", "error", "(", "\"Need a genome to create background\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "bg_type", "==", "\"random\"", ":", "f", "=", "MarkovFasta", "(", "fg", ",", "k", "=", "1", ",", "n", "=", "nr_times", "*", "len", "(", "fg", ")", ")", "logger", ".", "debug", "(", "\"Random background: %s\"", ",", "outfile", ")", "elif", "bg_type", "==", "\"genomic\"", ":", "logger", ".", "debug", "(", "\"Creating genomic background\"", ")", "f", "=", "RandomGenomicFasta", "(", "genome", ",", "width", ",", "nr_times", "*", "len", "(", "fg", ")", ")", "elif", "bg_type", "==", "\"gc\"", ":", "logger", ".", "debug", "(", "\"Creating GC matched background\"", ")", "f", "=", "MatchedGcFasta", "(", "fafile", ",", "genome", ",", "nr_times", "*", "len", "(", "fg", ")", ")", "logger", ".", "debug", "(", "\"GC matched background: %s\"", ",", "outfile", ")", "elif", "bg_type", "==", "\"promoter\"", ":", "fname", "=", "Genome", "(", "genome", ")", ".", "filename", "gene_file", "=", "fname", ".", "replace", "(", "\".fa\"", ",", "\".annotation.bed.gz\"", ")", "if", "not", "gene_file", ":", "gene_file", "=", "os", ".", "path", ".", "join", "(", "config", ".", "get_gene_dir", "(", ")", ",", "\"%s.bed\"", "%", "genome", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "gene_file", ")", ":", "print", "(", "\"Could not find a gene file for genome {}\"", ")", "print", "(", "\"Did you use the --annotation flag for genomepy?\"", ")", "print", "(", "\"Alternatively make sure there is a file called {}.bed in {}\"", ".", "format", "(", "genome", ",", "config", ".", "get_gene_dir", "(", ")", ")", ")", "raise", "ValueError", "(", ")", "logger", ".", "info", "(", "\"Creating random promoter background (%s, using genes in %s)\"", ",", "genome", ",", "gene_file", ")", "f", "=", "PromoterFasta", "(", "gene_file", ",", "genome", ",", "width", ",", "nr_times", "*", "len", "(", "fg", ")", ")", "logger", ".", "debug", "(", "\"Random promoter background: %s\"", ",", "outfile", ")", "elif", "bg_type", "==", "\"custom\"", ":", "bg_file", "=", "custom_background", "if", "not", "bg_file", ":", "raise", "IOError", "(", "\"Background file not specified!\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "bg_file", ")", ":", "raise", "IOError", "(", "\"Custom background file %s does not exist!\"", ",", "bg_file", ")", "else", ":", "logger", ".", "info", "(", "\"Copying custom background file %s to %s.\"", ",", "bg_file", ",", "outfile", ")", "f", "=", "Fasta", "(", "bg_file", ")", "l", "=", "np", ".", "median", "(", "[", "len", "(", "seq", ")", "for", "seq", "in", "f", ".", "seqs", "]", ")", "if", "l", "<", "(", "width", "*", "0.95", ")", "or", "l", ">", "(", "width", "*", "1.05", ")", ":", "logger", ".", "warn", "(", "\"The custom background file %s contains sequences with a \"", "\"median length of %s, while GimmeMotifs predicts motifs in sequences \"", "\"of length %s. This will influence the statistics! It is recommended \"", "\"to use background sequences of the same length.\"", ",", "bg_file", ",", "l", ",", "width", ")", "f", ".", "writefasta", "(", "outfile", ")", "return", "len", "(", "f", ")" ]
Create background of a specific type. Parameters ---------- bg_type : str Name of background type. fafile : str Name of input FASTA file. outfile : str Name of output FASTA file. genome : str, optional Genome name. width : int, optional Size of regions. nr_times : int, optional Generate this times as many background sequences as compared to input file. Returns ------- nr_seqs : int Number of sequences created.
[ "Create", "background", "of", "a", "specific", "type", "." ]
python
train
pmorissette/ffn
ffn/core.py
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L1986-L2031
def plot_heatmap(data, title='Heatmap', show_legend=True, show_labels=True, label_fmt='.2f', vmin=None, vmax=None, figsize=None, label_color='w', cmap='RdBu', **kwargs): """ Plot a heatmap using matplotlib's pcolor. Args: * data (DataFrame): DataFrame to plot. Usually small matrix (ex. correlation matrix). * title (string): Plot title * show_legend (bool): Show color legend * show_labels (bool): Show value labels * label_fmt (str): Label format string * vmin (float): Min value for scale * vmax (float): Max value for scale * cmap (string): Color map * kwargs: Passed to matplotlib's pcolor """ fig, ax = plt.subplots(figsize=figsize) heatmap = ax.pcolor(data, vmin=vmin, vmax=vmax, cmap=cmap) # for some reason heatmap has the y values backwards.... ax.invert_yaxis() if title is not None: plt.title(title) if show_legend: fig.colorbar(heatmap) if show_labels: vals = data.values for x in range(data.shape[0]): for y in range(data.shape[1]): plt.text(x + 0.5, y + 0.5, format(vals[y, x], label_fmt), horizontalalignment='center', verticalalignment='center', color=label_color) plt.yticks(np.arange(0.5, len(data.index), 1), data.index) plt.xticks(np.arange(0.5, len(data.columns), 1), data.columns) return plt
[ "def", "plot_heatmap", "(", "data", ",", "title", "=", "'Heatmap'", ",", "show_legend", "=", "True", ",", "show_labels", "=", "True", ",", "label_fmt", "=", "'.2f'", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "figsize", "=", "None", ",", "label_color", "=", "'w'", ",", "cmap", "=", "'RdBu'", ",", "*", "*", "kwargs", ")", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "heatmap", "=", "ax", ".", "pcolor", "(", "data", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "cmap", "=", "cmap", ")", "# for some reason heatmap has the y values backwards....", "ax", ".", "invert_yaxis", "(", ")", "if", "title", "is", "not", "None", ":", "plt", ".", "title", "(", "title", ")", "if", "show_legend", ":", "fig", ".", "colorbar", "(", "heatmap", ")", "if", "show_labels", ":", "vals", "=", "data", ".", "values", "for", "x", "in", "range", "(", "data", ".", "shape", "[", "0", "]", ")", ":", "for", "y", "in", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ":", "plt", ".", "text", "(", "x", "+", "0.5", ",", "y", "+", "0.5", ",", "format", "(", "vals", "[", "y", ",", "x", "]", ",", "label_fmt", ")", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "color", "=", "label_color", ")", "plt", ".", "yticks", "(", "np", ".", "arange", "(", "0.5", ",", "len", "(", "data", ".", "index", ")", ",", "1", ")", ",", "data", ".", "index", ")", "plt", ".", "xticks", "(", "np", ".", "arange", "(", "0.5", ",", "len", "(", "data", ".", "columns", ")", ",", "1", ")", ",", "data", ".", "columns", ")", "return", "plt" ]
Plot a heatmap using matplotlib's pcolor. Args: * data (DataFrame): DataFrame to plot. Usually small matrix (ex. correlation matrix). * title (string): Plot title * show_legend (bool): Show color legend * show_labels (bool): Show value labels * label_fmt (str): Label format string * vmin (float): Min value for scale * vmax (float): Max value for scale * cmap (string): Color map * kwargs: Passed to matplotlib's pcolor
[ "Plot", "a", "heatmap", "using", "matplotlib", "s", "pcolor", "." ]
python
train
GeorgeArgyros/symautomata
symautomata/cfggenerator.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/cfggenerator.py#L175-L187
def generate(self): """ Generates a new random string from the start symbol Args: None Returns: str: The generated string """ result = self._gen(self.optimized, self.splitstring) if self.splitstring and result is not None: result = result[1:] return result
[ "def", "generate", "(", "self", ")", ":", "result", "=", "self", ".", "_gen", "(", "self", ".", "optimized", ",", "self", ".", "splitstring", ")", "if", "self", ".", "splitstring", "and", "result", "is", "not", "None", ":", "result", "=", "result", "[", "1", ":", "]", "return", "result" ]
Generates a new random string from the start symbol Args: None Returns: str: The generated string
[ "Generates", "a", "new", "random", "string", "from", "the", "start", "symbol", "Args", ":", "None", "Returns", ":", "str", ":", "The", "generated", "string" ]
python
train
mitsei/dlkit
dlkit/json_/assessment_authoring/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/objects.py#L369-L391
def _init_map(self, record_types=None, **kwargs): """Initialize form map""" osid_objects.OsidContainableForm._init_map(self) osid_objects.OsidOperableForm._init_map(self) osid_objects.OsidObjectForm._init_map(self, record_types=record_types) if 'assessment_part_id' in kwargs: self._my_map['assessmentPartId'] = str(kwargs['assessment_part_id']) if 'mdata' in kwargs: self._my_map['sequestered'] = kwargs['mdata']['sequestered']['default_boolean_values'][0] else: self._my_map['assessmentPartId'] = self._assessment_part_default self._my_map['sequestered'] = False # Parts under Assessments must be "Sections" if 'assessment_id' in kwargs: self._my_map['assessmentId'] = str(kwargs['assessment_id']) else: self._my_map['assessmentId'] = self._assessment_default self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])] self._my_map['allocatedTime'] = self._allocated_time_default self._my_map['itemsSequential'] = self._items_sequential_default self._my_map['itemsShuffled'] = self._items_shuffled_default self._my_map['weight'] = self._weight_default if self._supports_simple_sequencing(): self._my_map['childIds'] = []
[ "def", "_init_map", "(", "self", ",", "record_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osid_objects", ".", "OsidContainableForm", ".", "_init_map", "(", "self", ")", "osid_objects", ".", "OsidOperableForm", ".", "_init_map", "(", "self", ")", "osid_objects", ".", "OsidObjectForm", ".", "_init_map", "(", "self", ",", "record_types", "=", "record_types", ")", "if", "'assessment_part_id'", "in", "kwargs", ":", "self", ".", "_my_map", "[", "'assessmentPartId'", "]", "=", "str", "(", "kwargs", "[", "'assessment_part_id'", "]", ")", "if", "'mdata'", "in", "kwargs", ":", "self", ".", "_my_map", "[", "'sequestered'", "]", "=", "kwargs", "[", "'mdata'", "]", "[", "'sequestered'", "]", "[", "'default_boolean_values'", "]", "[", "0", "]", "else", ":", "self", ".", "_my_map", "[", "'assessmentPartId'", "]", "=", "self", ".", "_assessment_part_default", "self", ".", "_my_map", "[", "'sequestered'", "]", "=", "False", "# Parts under Assessments must be \"Sections\"", "if", "'assessment_id'", "in", "kwargs", ":", "self", ".", "_my_map", "[", "'assessmentId'", "]", "=", "str", "(", "kwargs", "[", "'assessment_id'", "]", ")", "else", ":", "self", ".", "_my_map", "[", "'assessmentId'", "]", "=", "self", ".", "_assessment_default", "self", ".", "_my_map", "[", "'assignedBankIds'", "]", "=", "[", "str", "(", "kwargs", "[", "'bank_id'", "]", ")", "]", "self", ".", "_my_map", "[", "'allocatedTime'", "]", "=", "self", ".", "_allocated_time_default", "self", ".", "_my_map", "[", "'itemsSequential'", "]", "=", "self", ".", "_items_sequential_default", "self", ".", "_my_map", "[", "'itemsShuffled'", "]", "=", "self", ".", "_items_shuffled_default", "self", ".", "_my_map", "[", "'weight'", "]", "=", "self", ".", "_weight_default", "if", "self", ".", "_supports_simple_sequencing", "(", ")", ":", "self", ".", "_my_map", "[", "'childIds'", "]", "=", "[", "]" ]
Initialize form map
[ "Initialize", "form", "map" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/sip/ip_access_control_list/ip_address.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/ip_access_control_list/ip_address.py#L472-L488
def update(self, ip_address=values.unset, friendly_name=values.unset, cidr_prefix_length=values.unset): """ Update the IpAddressInstance :param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today. :param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long. :param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used. :returns: Updated IpAddressInstance :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance """ return self._proxy.update( ip_address=ip_address, friendly_name=friendly_name, cidr_prefix_length=cidr_prefix_length, )
[ "def", "update", "(", "self", ",", "ip_address", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "cidr_prefix_length", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "ip_address", "=", "ip_address", ",", "friendly_name", "=", "friendly_name", ",", "cidr_prefix_length", "=", "cidr_prefix_length", ",", ")" ]
Update the IpAddressInstance :param unicode ip_address: An IP address in dotted decimal notation from which you want to accept traffic. Any SIP requests from this IP address will be allowed by Twilio. IPv4 only supported today. :param unicode friendly_name: A human readable descriptive text for this resource, up to 64 characters long. :param unicode cidr_prefix_length: An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic. By default the entire IP address is used. :returns: Updated IpAddressInstance :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressInstance
[ "Update", "the", "IpAddressInstance" ]
python
train
sunlightlabs/django-locksmith
locksmith/hub/models.py
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/models.py#L82-L87
def mark_for_update(self): ''' Note that a change has been made so all Statuses need update ''' self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE) push_key.delay(self)
[ "def", "mark_for_update", "(", "self", ")", ":", "self", ".", "pub_statuses", ".", "exclude", "(", "status", "=", "UNPUBLISHED", ")", ".", "update", "(", "status", "=", "NEEDS_UPDATE", ")", "push_key", ".", "delay", "(", "self", ")" ]
Note that a change has been made so all Statuses need update
[ "Note", "that", "a", "change", "has", "been", "made", "so", "all", "Statuses", "need", "update" ]
python
train
llazzaro/django-scheduler
schedule/views.py
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/views.py#L251-L274
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): """ Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used. """ if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
[ "def", "get_occurrence", "(", "event_id", ",", "occurrence_id", "=", "None", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "tzinfo", "=", "None", ")", ":", "if", "(", "occurrence_id", ")", ":", "occurrence", "=", "get_object_or_404", "(", "Occurrence", ",", "id", "=", "occurrence_id", ")", "event", "=", "occurrence", ".", "event", "elif", "None", "not", "in", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ")", ":", "event", "=", "get_object_or_404", "(", "Event", ",", "id", "=", "event_id", ")", "date", "=", "timezone", ".", "make_aware", "(", "datetime", ".", "datetime", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ",", "int", "(", "day", ")", ",", "int", "(", "hour", ")", ",", "int", "(", "minute", ")", ",", "int", "(", "second", ")", ")", ",", "tzinfo", ")", "occurrence", "=", "event", ".", "get_occurrence", "(", "date", ")", "if", "occurrence", "is", "None", ":", "raise", "Http404", "else", ":", "raise", "Http404", "return", "event", ",", "occurrence" ]
Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used.
[ "Because", "occurrences", "don", "t", "have", "to", "be", "persisted", "there", "must", "be", "two", "ways", "to", "retrieve", "them", ".", "both", "need", "an", "event", "but", "if", "its", "persisted", "the", "occurrence", "can", "be", "retrieved", "with", "an", "id", ".", "If", "it", "is", "not", "persisted", "it", "takes", "a", "date", "to", "retrieve", "it", ".", "This", "function", "returns", "an", "event", "and", "occurrence", "regardless", "of", "which", "method", "is", "used", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/assessment_utilities.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/assessment_utilities.py#L46-L56
def get_first_part_id_for_assessment(assessment_id, runtime=None, proxy=None, create=False, bank_id=None): """Gets the first part id, which represents the first section, of assessment""" if create and bank_id is None: raise NullArgument('Bank Id must be provided for create option') try: return get_next_part_id(assessment_id, runtime, proxy, sequestered=False)[0] except IllegalState: if create: return create_first_assessment_section(assessment_id, runtime, proxy, bank_id) else: raise
[ "def", "get_first_part_id_for_assessment", "(", "assessment_id", ",", "runtime", "=", "None", ",", "proxy", "=", "None", ",", "create", "=", "False", ",", "bank_id", "=", "None", ")", ":", "if", "create", "and", "bank_id", "is", "None", ":", "raise", "NullArgument", "(", "'Bank Id must be provided for create option'", ")", "try", ":", "return", "get_next_part_id", "(", "assessment_id", ",", "runtime", ",", "proxy", ",", "sequestered", "=", "False", ")", "[", "0", "]", "except", "IllegalState", ":", "if", "create", ":", "return", "create_first_assessment_section", "(", "assessment_id", ",", "runtime", ",", "proxy", ",", "bank_id", ")", "else", ":", "raise" ]
Gets the first part id, which represents the first section, of assessment
[ "Gets", "the", "first", "part", "id", "which", "represents", "the", "first", "section", "of", "assessment" ]
python
train
gmdzy2010/dingtalk_sdk_gmdzy2010
dingtalk_sdk_gmdzy2010/user_request.py
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/user_request.py#L30-L34
def get_userinfo(self): """Method to get current user's name, mobile, email and position.""" wanted_fields = ["name", "mobile", "orgEmail", "position", "avatar"] userinfo = {k: self.json_response.get(k, None) for k in wanted_fields} return userinfo
[ "def", "get_userinfo", "(", "self", ")", ":", "wanted_fields", "=", "[", "\"name\"", ",", "\"mobile\"", ",", "\"orgEmail\"", ",", "\"position\"", ",", "\"avatar\"", "]", "userinfo", "=", "{", "k", ":", "self", ".", "json_response", ".", "get", "(", "k", ",", "None", ")", "for", "k", "in", "wanted_fields", "}", "return", "userinfo" ]
Method to get current user's name, mobile, email and position.
[ "Method", "to", "get", "current", "user", "s", "name", "mobile", "email", "and", "position", "." ]
python
train
gvanderheide/discreteMarkovChain
discreteMarkovChain/markovChain.py
https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L194-L243
def indirectInitialMatrix(self, initialState): """ Given some initial state, this iteratively determines new states. We repeatedly call the transition function on unvisited states in the frontier set. Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary. """ mapping = {} rates = OrderedDict() #Check whether the initial state is defined and of the correct type, and convert to a tuple or int. convertedState = self.checkInitialState(initialState) if isinstance(convertedState,set): #If initialstates is a set, include all states in the set in the mapping. frontier = set( convertedState ) for idx,state in enumerate(convertedState): mapping[state] = idx if idx == 0: #Test the return type of the transition function (dict or numpy). usesNumpy = self.checkTransitionType(initialState) else: #Otherwise include only the single state. frontier = set( [convertedState] ) usesNumpy = self.checkTransitionType(initialState) mapping[convertedState] = 0 while len(frontier) > 0: fromstate = frontier.pop() fromindex = mapping[fromstate] if usesNumpy: #If numpy is used, convert to a dictionary with tuples and rates. transitions = self.transition(np.array(fromstate)) transitions = self.convertToTransitionDict(transitions) else: transitions = self.transition(fromstate) for tostate,rate in transitions.items(): if tostate not in mapping: frontier.add(tostate) mapping[tostate] = len(mapping) toindex = mapping[tostate] rates[(fromindex, toindex)] = rate #Inverse the keys and values in mapping to get a dictionary with indices and states. self.mapping = {value: key for key, value in list(mapping.items())} #Use the `rates` dictionary to fill a sparse dok matrix. D = dok_matrix((self.size,self.size)) D.update(rates) return D.tocsr()
[ "def", "indirectInitialMatrix", "(", "self", ",", "initialState", ")", ":", "mapping", "=", "{", "}", "rates", "=", "OrderedDict", "(", ")", "#Check whether the initial state is defined and of the correct type, and convert to a tuple or int. ", "convertedState", "=", "self", ".", "checkInitialState", "(", "initialState", ")", "if", "isinstance", "(", "convertedState", ",", "set", ")", ":", "#If initialstates is a set, include all states in the set in the mapping.", "frontier", "=", "set", "(", "convertedState", ")", "for", "idx", ",", "state", "in", "enumerate", "(", "convertedState", ")", ":", "mapping", "[", "state", "]", "=", "idx", "if", "idx", "==", "0", ":", "#Test the return type of the transition function (dict or numpy).", "usesNumpy", "=", "self", ".", "checkTransitionType", "(", "initialState", ")", "else", ":", "#Otherwise include only the single state.", "frontier", "=", "set", "(", "[", "convertedState", "]", ")", "usesNumpy", "=", "self", ".", "checkTransitionType", "(", "initialState", ")", "mapping", "[", "convertedState", "]", "=", "0", "while", "len", "(", "frontier", ")", ">", "0", ":", "fromstate", "=", "frontier", ".", "pop", "(", ")", "fromindex", "=", "mapping", "[", "fromstate", "]", "if", "usesNumpy", ":", "#If numpy is used, convert to a dictionary with tuples and rates.", "transitions", "=", "self", ".", "transition", "(", "np", ".", "array", "(", "fromstate", ")", ")", "transitions", "=", "self", ".", "convertToTransitionDict", "(", "transitions", ")", "else", ":", "transitions", "=", "self", ".", "transition", "(", "fromstate", ")", "for", "tostate", ",", "rate", "in", "transitions", ".", "items", "(", ")", ":", "if", "tostate", "not", "in", "mapping", ":", "frontier", ".", "add", "(", "tostate", ")", "mapping", "[", "tostate", "]", "=", "len", "(", "mapping", ")", "toindex", "=", "mapping", "[", "tostate", "]", "rates", "[", "(", "fromindex", ",", "toindex", ")", "]", "=", "rate", "#Inverse the keys and values in mapping to get a dictionary with indices and states.", "self", ".", "mapping", "=", "{", "value", ":", "key", "for", "key", ",", "value", "in", "list", "(", "mapping", ".", "items", "(", ")", ")", "}", "#Use the `rates` dictionary to fill a sparse dok matrix.", "D", "=", "dok_matrix", "(", "(", "self", ".", "size", ",", "self", ".", "size", ")", ")", "D", ".", "update", "(", "rates", ")", "return", "D", ".", "tocsr", "(", ")" ]
Given some initial state, this iteratively determines new states. We repeatedly call the transition function on unvisited states in the frontier set. Each newly visited state is put in a dictionary called 'mapping' and the rates are stored in a dictionary.
[ "Given", "some", "initial", "state", "this", "iteratively", "determines", "new", "states", ".", "We", "repeatedly", "call", "the", "transition", "function", "on", "unvisited", "states", "in", "the", "frontier", "set", ".", "Each", "newly", "visited", "state", "is", "put", "in", "a", "dictionary", "called", "mapping", "and", "the", "rates", "are", "stored", "in", "a", "dictionary", "." ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2980-L3060
def run(self, indent_size=DEFAULT_INDENT_SIZE): """Fix indentation and return modified line numbers. Line numbers are indexed at 1. """ if indent_size < 1: return self.input_text try: stats = _reindent_stats(tokenize.generate_tokens(self.getline)) except (SyntaxError, tokenize.TokenError): return self.input_text # Remove trailing empty lines. lines = self.lines # Sentinel. stats.append((len(lines), 0)) # Map count of leading spaces to # we want. have2want = {} # Program after transformation. after = [] # Copy over initial empty lines -- there's nothing to do until # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) for i in range(len(stats) - 1): thisstmt, thislevel = stats[i] nextstmt = stats[i + 1][0] have = _leading_space_count(lines[thisstmt]) want = thislevel * indent_size if want < 0: # A comment line. if have: # An indented comment line. If we saw the same # indentation before, reuse what it most recently # mapped to. want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. for j in range(i + 1, len(stats) - 1): jline, jlevel = stats[j] if jlevel >= 0: if have == _leading_space_count(lines[jline]): want = jlevel * indent_size break # Maybe it's a hanging comment like this one, if want < 0: # in which case we should shift it like its base # line got shifted. for j in range(i - 1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: want = (have + _leading_space_count( after[jline - 1]) - _leading_space_count(lines[jline])) break if want < 0: # Still no luck -- leave it alone. want = have else: want = 0 assert want >= 0 have2want[have] = want diff = want - have if diff == 0 or have == 0: after.extend(lines[thisstmt:nextstmt]) else: for line_number, line in enumerate(lines[thisstmt:nextstmt], start=thisstmt): if line_number in self.string_content_line_numbers: after.append(line) elif diff > 0: if line == '\n': after.append(line) else: after.append(' ' * diff + line) else: remove = min(_leading_space_count(line), -diff) after.append(line[remove:]) return ''.join(after)
[ "def", "run", "(", "self", ",", "indent_size", "=", "DEFAULT_INDENT_SIZE", ")", ":", "if", "indent_size", "<", "1", ":", "return", "self", ".", "input_text", "try", ":", "stats", "=", "_reindent_stats", "(", "tokenize", ".", "generate_tokens", "(", "self", ".", "getline", ")", ")", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "return", "self", ".", "input_text", "# Remove trailing empty lines.", "lines", "=", "self", ".", "lines", "# Sentinel.", "stats", ".", "append", "(", "(", "len", "(", "lines", ")", ",", "0", ")", ")", "# Map count of leading spaces to # we want.", "have2want", "=", "{", "}", "# Program after transformation.", "after", "=", "[", "]", "# Copy over initial empty lines -- there's nothing to do until", "# we see a line with *something* on it.", "i", "=", "stats", "[", "0", "]", "[", "0", "]", "after", ".", "extend", "(", "lines", "[", "1", ":", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "stats", ")", "-", "1", ")", ":", "thisstmt", ",", "thislevel", "=", "stats", "[", "i", "]", "nextstmt", "=", "stats", "[", "i", "+", "1", "]", "[", "0", "]", "have", "=", "_leading_space_count", "(", "lines", "[", "thisstmt", "]", ")", "want", "=", "thislevel", "*", "indent_size", "if", "want", "<", "0", ":", "# A comment line.", "if", "have", ":", "# An indented comment line. If we saw the same", "# indentation before, reuse what it most recently", "# mapped to.", "want", "=", "have2want", ".", "get", "(", "have", ",", "-", "1", ")", "if", "want", "<", "0", ":", "# Then it probably belongs to the next real stmt.", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "stats", ")", "-", "1", ")", ":", "jline", ",", "jlevel", "=", "stats", "[", "j", "]", "if", "jlevel", ">=", "0", ":", "if", "have", "==", "_leading_space_count", "(", "lines", "[", "jline", "]", ")", ":", "want", "=", "jlevel", "*", "indent_size", "break", "# Maybe it's a hanging comment like this one,", "if", "want", "<", "0", ":", "# in which case we should shift it like its base", "# line got shifted.", "for", "j", "in", "range", "(", "i", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "jline", ",", "jlevel", "=", "stats", "[", "j", "]", "if", "jlevel", ">=", "0", ":", "want", "=", "(", "have", "+", "_leading_space_count", "(", "after", "[", "jline", "-", "1", "]", ")", "-", "_leading_space_count", "(", "lines", "[", "jline", "]", ")", ")", "break", "if", "want", "<", "0", ":", "# Still no luck -- leave it alone.", "want", "=", "have", "else", ":", "want", "=", "0", "assert", "want", ">=", "0", "have2want", "[", "have", "]", "=", "want", "diff", "=", "want", "-", "have", "if", "diff", "==", "0", "or", "have", "==", "0", ":", "after", ".", "extend", "(", "lines", "[", "thisstmt", ":", "nextstmt", "]", ")", "else", ":", "for", "line_number", ",", "line", "in", "enumerate", "(", "lines", "[", "thisstmt", ":", "nextstmt", "]", ",", "start", "=", "thisstmt", ")", ":", "if", "line_number", "in", "self", ".", "string_content_line_numbers", ":", "after", ".", "append", "(", "line", ")", "elif", "diff", ">", "0", ":", "if", "line", "==", "'\\n'", ":", "after", ".", "append", "(", "line", ")", "else", ":", "after", ".", "append", "(", "' '", "*", "diff", "+", "line", ")", "else", ":", "remove", "=", "min", "(", "_leading_space_count", "(", "line", ")", ",", "-", "diff", ")", "after", ".", "append", "(", "line", "[", "remove", ":", "]", ")", "return", "''", ".", "join", "(", "after", ")" ]
Fix indentation and return modified line numbers. Line numbers are indexed at 1.
[ "Fix", "indentation", "and", "return", "modified", "line", "numbers", "." ]
python
train
DarkEnergySurvey/ugali
ugali/analysis/mcmc.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L172-L192
def lnprob(self,theta): """ Logarithm of the probability """ global niter params,priors,loglike = self.params,self.priors,self.loglike # Avoid extra likelihood calls with bad priors _lnprior = self.lnprior(theta) if np.isfinite(_lnprior): _lnlike = self.lnlike(theta) else: _lnprior = -np.inf _lnlike = -np.inf _lnprob = _lnprior + _lnlike if (niter%100==0): msg = "%i function calls ...\n"%niter msg+= ', '.join('%s: %.3f'%(k,v) for k,v in zip(params,theta)) msg+= '\nlog(like): %.3f, log(prior): %.3f'%(_lnprior,_lnlike) logger.debug(msg) niter+=1 return _lnprob
[ "def", "lnprob", "(", "self", ",", "theta", ")", ":", "global", "niter", "params", ",", "priors", ",", "loglike", "=", "self", ".", "params", ",", "self", ".", "priors", ",", "self", ".", "loglike", "# Avoid extra likelihood calls with bad priors", "_lnprior", "=", "self", ".", "lnprior", "(", "theta", ")", "if", "np", ".", "isfinite", "(", "_lnprior", ")", ":", "_lnlike", "=", "self", ".", "lnlike", "(", "theta", ")", "else", ":", "_lnprior", "=", "-", "np", ".", "inf", "_lnlike", "=", "-", "np", ".", "inf", "_lnprob", "=", "_lnprior", "+", "_lnlike", "if", "(", "niter", "%", "100", "==", "0", ")", ":", "msg", "=", "\"%i function calls ...\\n\"", "%", "niter", "msg", "+=", "', '", ".", "join", "(", "'%s: %.3f'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "zip", "(", "params", ",", "theta", ")", ")", "msg", "+=", "'\\nlog(like): %.3f, log(prior): %.3f'", "%", "(", "_lnprior", ",", "_lnlike", ")", "logger", ".", "debug", "(", "msg", ")", "niter", "+=", "1", "return", "_lnprob" ]
Logarithm of the probability
[ "Logarithm", "of", "the", "probability" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2341-L2346
def _getTrailerString(self, compressed=1): """_getTrailerString(self, compressed=1) -> PyObject *""" if self.isClosed: raise ValueError("operation illegal for closed doc") return _fitz.Document__getTrailerString(self, compressed)
[ "def", "_getTrailerString", "(", "self", ",", "compressed", "=", "1", ")", ":", "if", "self", ".", "isClosed", ":", "raise", "ValueError", "(", "\"operation illegal for closed doc\"", ")", "return", "_fitz", ".", "Document__getTrailerString", "(", "self", ",", "compressed", ")" ]
_getTrailerString(self, compressed=1) -> PyObject *
[ "_getTrailerString", "(", "self", "compressed", "=", "1", ")", "-", ">", "PyObject", "*" ]
python
train
bitly/asyncmongo
asyncmongo/cursor.py
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/cursor.py#L421-L430
def __query_options(self): """Get the query options string to use for this query.""" options = 0 if self.__tailable: options |= _QUERY_OPTIONS["tailable_cursor"] if self.__slave_okay or self.__pool._slave_okay: options |= _QUERY_OPTIONS["slave_okay"] if not self.__timeout: options |= _QUERY_OPTIONS["no_timeout"] return options
[ "def", "__query_options", "(", "self", ")", ":", "options", "=", "0", "if", "self", ".", "__tailable", ":", "options", "|=", "_QUERY_OPTIONS", "[", "\"tailable_cursor\"", "]", "if", "self", ".", "__slave_okay", "or", "self", ".", "__pool", ".", "_slave_okay", ":", "options", "|=", "_QUERY_OPTIONS", "[", "\"slave_okay\"", "]", "if", "not", "self", ".", "__timeout", ":", "options", "|=", "_QUERY_OPTIONS", "[", "\"no_timeout\"", "]", "return", "options" ]
Get the query options string to use for this query.
[ "Get", "the", "query", "options", "string", "to", "use", "for", "this", "query", "." ]
python
train
1flow/python-ftr
ftr/config.py
https://github.com/1flow/python-ftr/blob/90a2108c5ee005f1bf66dbe8cce68f2b7051b839/ftr/config.py#L492-L507
def load(self, host, exact_host_match=False): """ Load a config for a hostname or url. This method calls :func:`ftr_get_config` and :meth`append` internally. Refer to their docs for details on parameters. """ # Can raise a SiteConfigNotFound, intentionally bubbled. config_string, host_string = ftr_get_config(host, exact_host_match) if config_string is None: LOGGER.error(u'Error while loading configuration.', extra={'siteconfig': host_string}) return self.append(ftr_string_to_instance(config_string))
[ "def", "load", "(", "self", ",", "host", ",", "exact_host_match", "=", "False", ")", ":", "# Can raise a SiteConfigNotFound, intentionally bubbled.", "config_string", ",", "host_string", "=", "ftr_get_config", "(", "host", ",", "exact_host_match", ")", "if", "config_string", "is", "None", ":", "LOGGER", ".", "error", "(", "u'Error while loading configuration.'", ",", "extra", "=", "{", "'siteconfig'", ":", "host_string", "}", ")", "return", "self", ".", "append", "(", "ftr_string_to_instance", "(", "config_string", ")", ")" ]
Load a config for a hostname or url. This method calls :func:`ftr_get_config` and :meth`append` internally. Refer to their docs for details on parameters.
[ "Load", "a", "config", "for", "a", "hostname", "or", "url", "." ]
python
train
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L163-L176
def split_array_as_list(self, values): """Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...] """ values = np.asarray(values) values = values[self.index.sorter] return np.split(values, self.index.slices[1:-1], axis=0)
[ "def", "split_array_as_list", "(", "self", ",", "values", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "values", "=", "values", "[", "self", ".", "index", ".", "sorter", "]", "return", "np", ".", "split", "(", "values", ",", "self", ".", "index", ".", "slices", "[", "1", ":", "-", "1", "]", ",", "axis", "=", "0", ")" ]
Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...]
[ "Group", "values", "as", "a", "list", "of", "arrays", "or", "a", "jagged", "-", "array" ]
python
train
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L301-L317
def check_denotation(target_values, predicted_values): """Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool """ # Check size if len(target_values) != len(predicted_values): return False # Check items for target in target_values: if not any(target.match(pred) for pred in predicted_values): return False return True
[ "def", "check_denotation", "(", "target_values", ",", "predicted_values", ")", ":", "# Check size", "if", "len", "(", "target_values", ")", "!=", "len", "(", "predicted_values", ")", ":", "return", "False", "# Check items", "for", "target", "in", "target_values", ":", "if", "not", "any", "(", "target", ".", "match", "(", "pred", ")", "for", "pred", "in", "predicted_values", ")", ":", "return", "False", "return", "True" ]
Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool
[ "Return", "True", "if", "the", "predicted", "denotation", "is", "correct", "." ]
python
train
saltstack/salt
salt/modules/openvswitch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L80-L96
def _stdout_list_split(retcode, stdout='', splitstring='\n'): ''' Evaulates Open vSwitch command`s retcode value. Args: retcode: Value of retcode field from response, should be 0, 1 or 2. stdout: Value of stdout filed from response. splitstring: String used to split the stdout default new line. Returns: List or False. ''' if retcode == 0: ret = stdout.split(splitstring) return ret else: return False
[ "def", "_stdout_list_split", "(", "retcode", ",", "stdout", "=", "''", ",", "splitstring", "=", "'\\n'", ")", ":", "if", "retcode", "==", "0", ":", "ret", "=", "stdout", ".", "split", "(", "splitstring", ")", "return", "ret", "else", ":", "return", "False" ]
Evaulates Open vSwitch command`s retcode value. Args: retcode: Value of retcode field from response, should be 0, 1 or 2. stdout: Value of stdout filed from response. splitstring: String used to split the stdout default new line. Returns: List or False.
[ "Evaulates", "Open", "vSwitch", "command", "s", "retcode", "value", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L1823-L1856
def _handle_job_without_successors(self, job, irsb, insn_addrs): """ A block without successors should still be handled so it can be added to the function graph correctly. :param CFGJob job: The current job that do not have any successor. :param IRSB irsb: The related IRSB. :param insn_addrs: A list of instruction addresses of this IRSB. :return: None """ # it's not an empty block # handle all conditional exits ins_addr = job.addr for stmt_idx, stmt in enumerate(irsb.statements): if type(stmt) is pyvex.IRStmt.IMark: ins_addr = stmt.addr + stmt.delta elif type(stmt) is pyvex.IRStmt.Exit: successor_jumpkind = stmt.jk self._update_function_transition_graph( job.block_id, None, jumpkind = successor_jumpkind, ins_addr=ins_addr, stmt_idx=stmt_idx, ) # handle the default exit successor_jumpkind = irsb.jumpkind successor_last_ins_addr = insn_addrs[-1] self._update_function_transition_graph(job.block_id, None, jumpkind=successor_jumpkind, ins_addr=successor_last_ins_addr, stmt_idx=DEFAULT_STATEMENT, )
[ "def", "_handle_job_without_successors", "(", "self", ",", "job", ",", "irsb", ",", "insn_addrs", ")", ":", "# it's not an empty block", "# handle all conditional exits", "ins_addr", "=", "job", ".", "addr", "for", "stmt_idx", ",", "stmt", "in", "enumerate", "(", "irsb", ".", "statements", ")", ":", "if", "type", "(", "stmt", ")", "is", "pyvex", ".", "IRStmt", ".", "IMark", ":", "ins_addr", "=", "stmt", ".", "addr", "+", "stmt", ".", "delta", "elif", "type", "(", "stmt", ")", "is", "pyvex", ".", "IRStmt", ".", "Exit", ":", "successor_jumpkind", "=", "stmt", ".", "jk", "self", ".", "_update_function_transition_graph", "(", "job", ".", "block_id", ",", "None", ",", "jumpkind", "=", "successor_jumpkind", ",", "ins_addr", "=", "ins_addr", ",", "stmt_idx", "=", "stmt_idx", ",", ")", "# handle the default exit", "successor_jumpkind", "=", "irsb", ".", "jumpkind", "successor_last_ins_addr", "=", "insn_addrs", "[", "-", "1", "]", "self", ".", "_update_function_transition_graph", "(", "job", ".", "block_id", ",", "None", ",", "jumpkind", "=", "successor_jumpkind", ",", "ins_addr", "=", "successor_last_ins_addr", ",", "stmt_idx", "=", "DEFAULT_STATEMENT", ",", ")" ]
A block without successors should still be handled so it can be added to the function graph correctly. :param CFGJob job: The current job that do not have any successor. :param IRSB irsb: The related IRSB. :param insn_addrs: A list of instruction addresses of this IRSB. :return: None
[ "A", "block", "without", "successors", "should", "still", "be", "handled", "so", "it", "can", "be", "added", "to", "the", "function", "graph", "correctly", "." ]
python
train
KE-works/pykechain
pykechain/models/scope.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/scope.py#L249-L275
def _update_scope_project_team(self, select_action, user, user_type): """ Update the Project Team of the Scope. Updates include addition or removing of managers or members. :param select_action: type of action to be applied :type select_action: basestring :param user: the username of the user to which the action applies to :type user: basestring :param user_type: the type of the user (member or manager) :type user_type: basestring :raises APIError: When unable to update the scope project team. """ if isinstance(user, str): users = self._client._retrieve_users() manager_object = next((item for item in users['results'] if item["username"] == user), None) if manager_object: url = self._client._build_url('scope', scope_id=self.id) r = self._client._request('PUT', url, params={'select_action': select_action}, data={ 'user_id': manager_object['pk'] }) if r.status_code != requests.codes.ok: # pragma: no cover raise APIError("Could not {} {} in Scope".format(select_action.split('_')[0], user_type)) else: raise NotFoundError("User {} does not exist".format(user)) else: raise TypeError("User {} should be defined as a string".format(user))
[ "def", "_update_scope_project_team", "(", "self", ",", "select_action", ",", "user", ",", "user_type", ")", ":", "if", "isinstance", "(", "user", ",", "str", ")", ":", "users", "=", "self", ".", "_client", ".", "_retrieve_users", "(", ")", "manager_object", "=", "next", "(", "(", "item", "for", "item", "in", "users", "[", "'results'", "]", "if", "item", "[", "\"username\"", "]", "==", "user", ")", ",", "None", ")", "if", "manager_object", ":", "url", "=", "self", ".", "_client", ".", "_build_url", "(", "'scope'", ",", "scope_id", "=", "self", ".", "id", ")", "r", "=", "self", ".", "_client", ".", "_request", "(", "'PUT'", ",", "url", ",", "params", "=", "{", "'select_action'", ":", "select_action", "}", ",", "data", "=", "{", "'user_id'", ":", "manager_object", "[", "'pk'", "]", "}", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "# pragma: no cover", "raise", "APIError", "(", "\"Could not {} {} in Scope\"", ".", "format", "(", "select_action", ".", "split", "(", "'_'", ")", "[", "0", "]", ",", "user_type", ")", ")", "else", ":", "raise", "NotFoundError", "(", "\"User {} does not exist\"", ".", "format", "(", "user", ")", ")", "else", ":", "raise", "TypeError", "(", "\"User {} should be defined as a string\"", ".", "format", "(", "user", ")", ")" ]
Update the Project Team of the Scope. Updates include addition or removing of managers or members. :param select_action: type of action to be applied :type select_action: basestring :param user: the username of the user to which the action applies to :type user: basestring :param user_type: the type of the user (member or manager) :type user_type: basestring :raises APIError: When unable to update the scope project team.
[ "Update", "the", "Project", "Team", "of", "the", "Scope", ".", "Updates", "include", "addition", "or", "removing", "of", "managers", "or", "members", "." ]
python
train
marshmallow-code/apispec
src/apispec/core.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/core.py#L279-L319
def path( self, path=None, operations=None, summary=None, description=None, **kwargs ): """Add a new path object to the spec. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#path-item-object :param str|None path: URL path component :param dict|None operations: describes the http methods and options for `path` :param str summary: short summary relevant to all operations in this path :param str description: long description relevant to all operations in this path :param dict kwargs: parameters used by any path helpers see :meth:`register_path_helper` """ operations = operations or OrderedDict() # Execute path helpers for plugin in self.plugins: try: ret = plugin.path_helper(path=path, operations=operations, **kwargs) except PluginMethodNotImplementedError: continue if ret is not None: path = ret if not path: raise APISpecError("Path template is not specified.") # Execute operation helpers for plugin in self.plugins: try: plugin.operation_helper(path=path, operations=operations, **kwargs) except PluginMethodNotImplementedError: continue clean_operations(operations, self.openapi_version.major) self._paths.setdefault(path, operations).update(operations) if summary is not None: self._paths[path]["summary"] = summary if description is not None: self._paths[path]["description"] = description return self
[ "def", "path", "(", "self", ",", "path", "=", "None", ",", "operations", "=", "None", ",", "summary", "=", "None", ",", "description", "=", "None", ",", "*", "*", "kwargs", ")", ":", "operations", "=", "operations", "or", "OrderedDict", "(", ")", "# Execute path helpers", "for", "plugin", "in", "self", ".", "plugins", ":", "try", ":", "ret", "=", "plugin", ".", "path_helper", "(", "path", "=", "path", ",", "operations", "=", "operations", ",", "*", "*", "kwargs", ")", "except", "PluginMethodNotImplementedError", ":", "continue", "if", "ret", "is", "not", "None", ":", "path", "=", "ret", "if", "not", "path", ":", "raise", "APISpecError", "(", "\"Path template is not specified.\"", ")", "# Execute operation helpers", "for", "plugin", "in", "self", ".", "plugins", ":", "try", ":", "plugin", ".", "operation_helper", "(", "path", "=", "path", ",", "operations", "=", "operations", ",", "*", "*", "kwargs", ")", "except", "PluginMethodNotImplementedError", ":", "continue", "clean_operations", "(", "operations", ",", "self", ".", "openapi_version", ".", "major", ")", "self", ".", "_paths", ".", "setdefault", "(", "path", ",", "operations", ")", ".", "update", "(", "operations", ")", "if", "summary", "is", "not", "None", ":", "self", ".", "_paths", "[", "path", "]", "[", "\"summary\"", "]", "=", "summary", "if", "description", "is", "not", "None", ":", "self", ".", "_paths", "[", "path", "]", "[", "\"description\"", "]", "=", "description", "return", "self" ]
Add a new path object to the spec. https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#path-item-object :param str|None path: URL path component :param dict|None operations: describes the http methods and options for `path` :param str summary: short summary relevant to all operations in this path :param str description: long description relevant to all operations in this path :param dict kwargs: parameters used by any path helpers see :meth:`register_path_helper`
[ "Add", "a", "new", "path", "object", "to", "the", "spec", "." ]
python
train
archman/beamline
beamline/pltutils.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/pltutils.py#L47-L116
def makeBeamline(beamlinelist, startpoint=(0, 0)): """ function to construct patches for ``plotLattice()``, from different elements like ``rbend``, ``quadrupole``,etc. parsing from lattice file, mad-8. drift sections are calculated from other elements. Input parameters: :param beamlinelist: list, which elements are dict, each dict is the description for magnetic element, should be returned from module ``blparser``, function ``madParser()`` :param startpoint: pos to start drawing, ``(0, 0)`` by default :return: tuple of beamline patches list, xlim and ylim * beamline patches list: patches to be drawn * xlim: data limit along x-axis * ylim: data limit along y-axis """ latticelist = [] anglenow = 0.0 maxx, maxy = startpoint minx, miny = startpoint for i in range(len(beamlinelist)): element = beamlinelist[i] elementtype = element["type"][0:4].lower() if elementtype == "rben": newelement = elements.Rbend(width=float(element["l"]), height=1.5 * float(element["l"]), angle=float(element["angle"]), link_node=startpoint, ) anglenow += newelement.angle minx = min(minx, newelement.minx) miny = min(miny, newelement.miny) maxx = max(maxx, newelement.maxx) maxy = max(maxy, newelement.maxy) elif elementtype == "drif": newelement = elements.Drift(length=float(element["l"]), angle=anglenow, link_node=startpoint, ) minx = min(minx, newelement.minx) miny = min(miny, newelement.miny) maxx = max(maxx, newelement.maxx) maxy = max(maxy, newelement.maxy) elif elementtype == "quad": xory = "x" if float(element["k1"]) < 0: xory = "y" newelement = elements.Quadrupole(width=float(element["l"]), angle=float(element["angle"]), xysign=xory, link_node=startpoint, ) minx = min(minx, newelement.minx) miny = min(miny, newelement.miny) maxx = max(maxx, newelement.maxx) maxy = max(maxy, newelement.maxy) elif elementtype == "undu": newelement = elements.Undulator(period_length=float(element["xlamd"]), period_number=int(element["nwig"]), link_node=startpoint, ) minx = min(minx, newelement.minx) miny = min(miny, newelement.miny) maxx = max(maxx, newelement.maxx) maxy = max(maxy, newelement.maxy) else: print("unknown element\n") startpoint = newelement.link_node latticelist.append(newelement) return latticelist, np.array([minx, maxx]), np.array([miny, maxy])
[ "def", "makeBeamline", "(", "beamlinelist", ",", "startpoint", "=", "(", "0", ",", "0", ")", ")", ":", "latticelist", "=", "[", "]", "anglenow", "=", "0.0", "maxx", ",", "maxy", "=", "startpoint", "minx", ",", "miny", "=", "startpoint", "for", "i", "in", "range", "(", "len", "(", "beamlinelist", ")", ")", ":", "element", "=", "beamlinelist", "[", "i", "]", "elementtype", "=", "element", "[", "\"type\"", "]", "[", "0", ":", "4", "]", ".", "lower", "(", ")", "if", "elementtype", "==", "\"rben\"", ":", "newelement", "=", "elements", ".", "Rbend", "(", "width", "=", "float", "(", "element", "[", "\"l\"", "]", ")", ",", "height", "=", "1.5", "*", "float", "(", "element", "[", "\"l\"", "]", ")", ",", "angle", "=", "float", "(", "element", "[", "\"angle\"", "]", ")", ",", "link_node", "=", "startpoint", ",", ")", "anglenow", "+=", "newelement", ".", "angle", "minx", "=", "min", "(", "minx", ",", "newelement", ".", "minx", ")", "miny", "=", "min", "(", "miny", ",", "newelement", ".", "miny", ")", "maxx", "=", "max", "(", "maxx", ",", "newelement", ".", "maxx", ")", "maxy", "=", "max", "(", "maxy", ",", "newelement", ".", "maxy", ")", "elif", "elementtype", "==", "\"drif\"", ":", "newelement", "=", "elements", ".", "Drift", "(", "length", "=", "float", "(", "element", "[", "\"l\"", "]", ")", ",", "angle", "=", "anglenow", ",", "link_node", "=", "startpoint", ",", ")", "minx", "=", "min", "(", "minx", ",", "newelement", ".", "minx", ")", "miny", "=", "min", "(", "miny", ",", "newelement", ".", "miny", ")", "maxx", "=", "max", "(", "maxx", ",", "newelement", ".", "maxx", ")", "maxy", "=", "max", "(", "maxy", ",", "newelement", ".", "maxy", ")", "elif", "elementtype", "==", "\"quad\"", ":", "xory", "=", "\"x\"", "if", "float", "(", "element", "[", "\"k1\"", "]", ")", "<", "0", ":", "xory", "=", "\"y\"", "newelement", "=", "elements", ".", "Quadrupole", "(", "width", "=", "float", "(", "element", "[", "\"l\"", "]", ")", ",", "angle", "=", "float", "(", "element", "[", "\"angle\"", "]", ")", ",", "xysign", "=", "xory", ",", "link_node", "=", "startpoint", ",", ")", "minx", "=", "min", "(", "minx", ",", "newelement", ".", "minx", ")", "miny", "=", "min", "(", "miny", ",", "newelement", ".", "miny", ")", "maxx", "=", "max", "(", "maxx", ",", "newelement", ".", "maxx", ")", "maxy", "=", "max", "(", "maxy", ",", "newelement", ".", "maxy", ")", "elif", "elementtype", "==", "\"undu\"", ":", "newelement", "=", "elements", ".", "Undulator", "(", "period_length", "=", "float", "(", "element", "[", "\"xlamd\"", "]", ")", ",", "period_number", "=", "int", "(", "element", "[", "\"nwig\"", "]", ")", ",", "link_node", "=", "startpoint", ",", ")", "minx", "=", "min", "(", "minx", ",", "newelement", ".", "minx", ")", "miny", "=", "min", "(", "miny", ",", "newelement", ".", "miny", ")", "maxx", "=", "max", "(", "maxx", ",", "newelement", ".", "maxx", ")", "maxy", "=", "max", "(", "maxy", ",", "newelement", ".", "maxy", ")", "else", ":", "print", "(", "\"unknown element\\n\"", ")", "startpoint", "=", "newelement", ".", "link_node", "latticelist", ".", "append", "(", "newelement", ")", "return", "latticelist", ",", "np", ".", "array", "(", "[", "minx", ",", "maxx", "]", ")", ",", "np", ".", "array", "(", "[", "miny", ",", "maxy", "]", ")" ]
function to construct patches for ``plotLattice()``, from different elements like ``rbend``, ``quadrupole``,etc. parsing from lattice file, mad-8. drift sections are calculated from other elements. Input parameters: :param beamlinelist: list, which elements are dict, each dict is the description for magnetic element, should be returned from module ``blparser``, function ``madParser()`` :param startpoint: pos to start drawing, ``(0, 0)`` by default :return: tuple of beamline patches list, xlim and ylim * beamline patches list: patches to be drawn * xlim: data limit along x-axis * ylim: data limit along y-axis
[ "function", "to", "construct", "patches", "for", "plotLattice", "()", "from", "different", "elements", "like", "rbend", "quadrupole", "etc", ".", "parsing", "from", "lattice", "file", "mad", "-", "8", ".", "drift", "sections", "are", "calculated", "from", "other", "elements", "." ]
python
train
MatterMiners/cobald
cobald/controller/stepwise.py
https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/controller/stepwise.py#L135-L165
def add(self, rule: ControlRule = None, *, supply: float): """ Register a new rule above a given ``supply`` threshold Registration supports a single-argument form for use as a decorator, as well as a two-argument form for direct application. Use the former for ``def`` or ``class`` definitions, and the later for ``lambda`` functions and existing callables. .. code:: python @control.add(supply=10) def linear(pool, interval): if pool.utilisation < 0.75: return pool.supply - interval elif pool.allocation > 0.95: return pool.supply + interval control.add( lambda pool, interval: pool.supply * (1.2 if pool.allocation > 0.75 else 0.9), supply=100 ) """ if supply in self._thresholds: raise ValueError('rule for threshold %s re-defined' % supply) if rule is not None: self.rules.append((supply, rule)) self._thresholds.add(supply) return rule else: return partial(self.add, supply=supply)
[ "def", "add", "(", "self", ",", "rule", ":", "ControlRule", "=", "None", ",", "*", ",", "supply", ":", "float", ")", ":", "if", "supply", "in", "self", ".", "_thresholds", ":", "raise", "ValueError", "(", "'rule for threshold %s re-defined'", "%", "supply", ")", "if", "rule", "is", "not", "None", ":", "self", ".", "rules", ".", "append", "(", "(", "supply", ",", "rule", ")", ")", "self", ".", "_thresholds", ".", "add", "(", "supply", ")", "return", "rule", "else", ":", "return", "partial", "(", "self", ".", "add", ",", "supply", "=", "supply", ")" ]
Register a new rule above a given ``supply`` threshold Registration supports a single-argument form for use as a decorator, as well as a two-argument form for direct application. Use the former for ``def`` or ``class`` definitions, and the later for ``lambda`` functions and existing callables. .. code:: python @control.add(supply=10) def linear(pool, interval): if pool.utilisation < 0.75: return pool.supply - interval elif pool.allocation > 0.95: return pool.supply + interval control.add( lambda pool, interval: pool.supply * (1.2 if pool.allocation > 0.75 else 0.9), supply=100 )
[ "Register", "a", "new", "rule", "above", "a", "given", "supply", "threshold" ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/nlp/pos.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/pos.py#L82-L195
def _get_features(self, i, context, prev, prev2): """Map tokens into a feature representation.""" w = self.lexicon[context[i]] features = [ 'bias', 'w:shape=%s' % w.shape, 'w:lower=%s' % w.lower, 'p1:tag=%s' % prev, 'p2:tag=%s' % prev2, 'p1:tag+w:lower=%s+%s' % (prev, w.lower), 'p1:tag+p2:tag=%s+%s' % (prev, prev2), ] if w.like_number: features.append('w:like_number') elif w.is_punct: features.append('w:is_punct') elif w.like_url: features.append('w:like_url') else: features.extend([ 'w:suffix2=%s' % w.lower[-2:], 'w:suffix3=%s' % w.lower[-3:], 'w:suffix4=%s' % w.lower[-4:], 'w:suffix5=%s' % w.lower[-5:], 'w:prefix1=%s' % w.lower[:1], 'w:prefix2=%s' % w.lower[:2], 'w:prefix3=%s' % w.lower[:3], ]) if w.is_alpha: features.append('w:is_alpha') elif w.is_hyphenated: features.append('w:is_hyphenated') if w.is_upper: features.append('w:is_upper') elif w.is_lower: features.append('w:is_lower') elif w.is_title: features.append('w:is_title') if self.clusters and w.cluster: features.extend([ 'w:cluster4=%s' % w.cluster[:4], 'w:cluster6=%s' % w.cluster[:6], 'w:cluster10=%s' % w.cluster[:10], 'w:cluster20=%s' % w.cluster[:20], ]) # Add features for previous tokens if present if i > 0: p1 = self.lexicon[context[i-1]] features.extend([ 'p1:lower=%s' % p1.lower, 'p1:shape=%s' % p1.shape, ]) if not (p1.like_number or p1.is_punct or p1.like_url): features.append('p1:suffix3=%s' % p1.lower[-3:]) if self.clusters and p1.cluster: features.extend([ 'p1:cluster4=%s' % p1.cluster[:4], 'p1:cluster6=%s' % p1.cluster[:6], 'p1:cluster10=%s' % p1.cluster[:10], 'p1:cluster20=%s' % p1.cluster[:20], ]) if i > 1: p2 = self.lexicon[context[i-2]] features.extend([ 'p2:lower=%s' % p2.lower, 'p2:shape=%s' % p2.shape, ]) if self.clusters and p2.cluster: features.extend([ 'p2:cluster4=%s' % p2.cluster[:4], 'p2:cluster6=%s' % p2.cluster[:6], 'p2:cluster10=%s' % p2.cluster[:10], 'p2:cluster20=%s' % p2.cluster[:20], ]) # Add features for next tokens if present end = len(context) - 1 if i < end: n1 = self.lexicon[context[i+1]] features.extend([ 'n1:lower=%s' % n1.lower, 'n1:shape=%s' % n1.shape ]) if not (n1.like_number or n1.is_punct or n1.like_url): features.append('n1:suffix3=%s' % n1.lower[-3:]) if self.clusters and n1.cluster: features.extend([ 'n1:cluster4=%s' % n1.cluster[:4], 'n1:cluster6=%s' % n1.cluster[:6], 'n1:cluster10=%s' % n1.cluster[:10], 'n1:cluster20=%s' % n1.cluster[:20], ]) if i < end - 1: n2 = self.lexicon[context[i+2]] features.extend([ 'n2:lower=%s' % n2.lower, 'n2:shape=%s' % n2.shape ]) if self.clusters and n2.cluster: features.extend([ 'n2:cluster4=%s' % n2.cluster[:4], 'n2:cluster6=%s' % n2.cluster[:6], 'n2:cluster10=%s' % n2.cluster[:10], 'n2:cluster20=%s' % n2.cluster[:20], ]) # Add position features if i == 0: features.append('-firsttoken-') elif i == 1: features.append('-secondtoken-') elif i == end - 1: features.append('-secondlasttoken-') elif i == end: features.append('-lasttoken-') return features
[ "def", "_get_features", "(", "self", ",", "i", ",", "context", ",", "prev", ",", "prev2", ")", ":", "w", "=", "self", ".", "lexicon", "[", "context", "[", "i", "]", "]", "features", "=", "[", "'bias'", ",", "'w:shape=%s'", "%", "w", ".", "shape", ",", "'w:lower=%s'", "%", "w", ".", "lower", ",", "'p1:tag=%s'", "%", "prev", ",", "'p2:tag=%s'", "%", "prev2", ",", "'p1:tag+w:lower=%s+%s'", "%", "(", "prev", ",", "w", ".", "lower", ")", ",", "'p1:tag+p2:tag=%s+%s'", "%", "(", "prev", ",", "prev2", ")", ",", "]", "if", "w", ".", "like_number", ":", "features", ".", "append", "(", "'w:like_number'", ")", "elif", "w", ".", "is_punct", ":", "features", ".", "append", "(", "'w:is_punct'", ")", "elif", "w", ".", "like_url", ":", "features", ".", "append", "(", "'w:like_url'", ")", "else", ":", "features", ".", "extend", "(", "[", "'w:suffix2=%s'", "%", "w", ".", "lower", "[", "-", "2", ":", "]", ",", "'w:suffix3=%s'", "%", "w", ".", "lower", "[", "-", "3", ":", "]", ",", "'w:suffix4=%s'", "%", "w", ".", "lower", "[", "-", "4", ":", "]", ",", "'w:suffix5=%s'", "%", "w", ".", "lower", "[", "-", "5", ":", "]", ",", "'w:prefix1=%s'", "%", "w", ".", "lower", "[", ":", "1", "]", ",", "'w:prefix2=%s'", "%", "w", ".", "lower", "[", ":", "2", "]", ",", "'w:prefix3=%s'", "%", "w", ".", "lower", "[", ":", "3", "]", ",", "]", ")", "if", "w", ".", "is_alpha", ":", "features", ".", "append", "(", "'w:is_alpha'", ")", "elif", "w", ".", "is_hyphenated", ":", "features", ".", "append", "(", "'w:is_hyphenated'", ")", "if", "w", ".", "is_upper", ":", "features", ".", "append", "(", "'w:is_upper'", ")", "elif", "w", ".", "is_lower", ":", "features", ".", "append", "(", "'w:is_lower'", ")", "elif", "w", ".", "is_title", ":", "features", ".", "append", "(", "'w:is_title'", ")", "if", "self", ".", "clusters", "and", "w", ".", "cluster", ":", "features", ".", "extend", "(", "[", "'w:cluster4=%s'", "%", "w", ".", "cluster", "[", ":", "4", "]", ",", "'w:cluster6=%s'", "%", "w", ".", "cluster", "[", ":", "6", "]", ",", "'w:cluster10=%s'", "%", "w", ".", "cluster", "[", ":", "10", "]", ",", "'w:cluster20=%s'", "%", "w", ".", "cluster", "[", ":", "20", "]", ",", "]", ")", "# Add features for previous tokens if present", "if", "i", ">", "0", ":", "p1", "=", "self", ".", "lexicon", "[", "context", "[", "i", "-", "1", "]", "]", "features", ".", "extend", "(", "[", "'p1:lower=%s'", "%", "p1", ".", "lower", ",", "'p1:shape=%s'", "%", "p1", ".", "shape", ",", "]", ")", "if", "not", "(", "p1", ".", "like_number", "or", "p1", ".", "is_punct", "or", "p1", ".", "like_url", ")", ":", "features", ".", "append", "(", "'p1:suffix3=%s'", "%", "p1", ".", "lower", "[", "-", "3", ":", "]", ")", "if", "self", ".", "clusters", "and", "p1", ".", "cluster", ":", "features", ".", "extend", "(", "[", "'p1:cluster4=%s'", "%", "p1", ".", "cluster", "[", ":", "4", "]", ",", "'p1:cluster6=%s'", "%", "p1", ".", "cluster", "[", ":", "6", "]", ",", "'p1:cluster10=%s'", "%", "p1", ".", "cluster", "[", ":", "10", "]", ",", "'p1:cluster20=%s'", "%", "p1", ".", "cluster", "[", ":", "20", "]", ",", "]", ")", "if", "i", ">", "1", ":", "p2", "=", "self", ".", "lexicon", "[", "context", "[", "i", "-", "2", "]", "]", "features", ".", "extend", "(", "[", "'p2:lower=%s'", "%", "p2", ".", "lower", ",", "'p2:shape=%s'", "%", "p2", ".", "shape", ",", "]", ")", "if", "self", ".", "clusters", "and", "p2", ".", "cluster", ":", "features", ".", "extend", "(", "[", "'p2:cluster4=%s'", "%", "p2", ".", "cluster", "[", ":", "4", "]", ",", "'p2:cluster6=%s'", "%", "p2", ".", "cluster", "[", ":", "6", "]", ",", "'p2:cluster10=%s'", "%", "p2", ".", "cluster", "[", ":", "10", "]", ",", "'p2:cluster20=%s'", "%", "p2", ".", "cluster", "[", ":", "20", "]", ",", "]", ")", "# Add features for next tokens if present", "end", "=", "len", "(", "context", ")", "-", "1", "if", "i", "<", "end", ":", "n1", "=", "self", ".", "lexicon", "[", "context", "[", "i", "+", "1", "]", "]", "features", ".", "extend", "(", "[", "'n1:lower=%s'", "%", "n1", ".", "lower", ",", "'n1:shape=%s'", "%", "n1", ".", "shape", "]", ")", "if", "not", "(", "n1", ".", "like_number", "or", "n1", ".", "is_punct", "or", "n1", ".", "like_url", ")", ":", "features", ".", "append", "(", "'n1:suffix3=%s'", "%", "n1", ".", "lower", "[", "-", "3", ":", "]", ")", "if", "self", ".", "clusters", "and", "n1", ".", "cluster", ":", "features", ".", "extend", "(", "[", "'n1:cluster4=%s'", "%", "n1", ".", "cluster", "[", ":", "4", "]", ",", "'n1:cluster6=%s'", "%", "n1", ".", "cluster", "[", ":", "6", "]", ",", "'n1:cluster10=%s'", "%", "n1", ".", "cluster", "[", ":", "10", "]", ",", "'n1:cluster20=%s'", "%", "n1", ".", "cluster", "[", ":", "20", "]", ",", "]", ")", "if", "i", "<", "end", "-", "1", ":", "n2", "=", "self", ".", "lexicon", "[", "context", "[", "i", "+", "2", "]", "]", "features", ".", "extend", "(", "[", "'n2:lower=%s'", "%", "n2", ".", "lower", ",", "'n2:shape=%s'", "%", "n2", ".", "shape", "]", ")", "if", "self", ".", "clusters", "and", "n2", ".", "cluster", ":", "features", ".", "extend", "(", "[", "'n2:cluster4=%s'", "%", "n2", ".", "cluster", "[", ":", "4", "]", ",", "'n2:cluster6=%s'", "%", "n2", ".", "cluster", "[", ":", "6", "]", ",", "'n2:cluster10=%s'", "%", "n2", ".", "cluster", "[", ":", "10", "]", ",", "'n2:cluster20=%s'", "%", "n2", ".", "cluster", "[", ":", "20", "]", ",", "]", ")", "# Add position features", "if", "i", "==", "0", ":", "features", ".", "append", "(", "'-firsttoken-'", ")", "elif", "i", "==", "1", ":", "features", ".", "append", "(", "'-secondtoken-'", ")", "elif", "i", "==", "end", "-", "1", ":", "features", ".", "append", "(", "'-secondlasttoken-'", ")", "elif", "i", "==", "end", ":", "features", ".", "append", "(", "'-lasttoken-'", ")", "return", "features" ]
Map tokens into a feature representation.
[ "Map", "tokens", "into", "a", "feature", "representation", "." ]
python
train
visualfabriq/bquery
bquery/ctable.py
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L76-L93
def group_cache_valid(self, col_list): """ Checks whether the column has a factorization that exists and is not older than the source :param col: :return: """ cache_valid = False if self.rootdir: col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \ '.values/__attrs__' exists_group_index = os.path.exists(col_values_file_check) missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')] cache_valid = (exists_group_index and not missing_col_check) return cache_valid
[ "def", "group_cache_valid", "(", "self", ",", "col_list", ")", ":", "cache_valid", "=", "False", "if", "self", ".", "rootdir", ":", "col_values_file_check", "=", "os", ".", "path", ".", "join", "(", "self", ".", "rootdir", ",", "self", ".", "create_group_base_name", "(", "col_list", ")", ")", "+", "'.values/__attrs__'", "exists_group_index", "=", "os", ".", "path", ".", "exists", "(", "col_values_file_check", ")", "missing_col_check", "=", "[", "1", "for", "col", "in", "col_list", "if", "not", "os", ".", "path", ".", "exists", "(", "self", "[", "col", "]", ".", "rootdir", "+", "'/__attrs__'", ")", "]", "cache_valid", "=", "(", "exists_group_index", "and", "not", "missing_col_check", ")", "return", "cache_valid" ]
Checks whether the column has a factorization that exists and is not older than the source :param col: :return:
[ "Checks", "whether", "the", "column", "has", "a", "factorization", "that", "exists", "and", "is", "not", "older", "than", "the", "source" ]
python
train
jasonbot/arcrest
arcrest/geometry.py
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/geometry.py#L391-L423
def contains(self, pt): "Tests if the provided point is in the polygon." if isinstance(pt, Point): ptx, pty = pt.x, pt.y assert (self.spatialReference is None or \ self.spatialReference.wkid is None) or \ (pt.spatialReference is None or \ pt.spatialReference.wkid is None) or \ self.spatialReference == pt.spatialReference, \ "Spatial references do not match." else: ptx, pty = pt in_shape = False # Ported nearly line-for-line from the Javascript API for ring in self._json_rings: for idx in range(len(ring)): idxp1 = idx + 1 if idxp1 >= len(ring): idxp1 -= len(ring) pi, pj = ring[idx], ring[idxp1] # Divide-by-zero checks if (pi[1] == pj[1]) and pty >= min((pi[1], pj[1])): if ptx >= max((pi[0], pj[0])): in_shape = not in_shape elif (pi[0] == pj[0]) and pty >= min((pi[0], pj[0])): if ptx >= max((pi[1], pj[1])): in_shape = not in_shape elif (((pi[1] < pty and pj[1] >= pty) or (pj[1] < pty and pi[1] >= pty)) and (pi[0] + (pty - pi[1]) / (pj[1] - pi[1]) * (pj[0] - pi[0]) < ptx)): in_shape = not in_shape return in_shape
[ "def", "contains", "(", "self", ",", "pt", ")", ":", "if", "isinstance", "(", "pt", ",", "Point", ")", ":", "ptx", ",", "pty", "=", "pt", ".", "x", ",", "pt", ".", "y", "assert", "(", "self", ".", "spatialReference", "is", "None", "or", "self", ".", "spatialReference", ".", "wkid", "is", "None", ")", "or", "(", "pt", ".", "spatialReference", "is", "None", "or", "pt", ".", "spatialReference", ".", "wkid", "is", "None", ")", "or", "self", ".", "spatialReference", "==", "pt", ".", "spatialReference", ",", "\"Spatial references do not match.\"", "else", ":", "ptx", ",", "pty", "=", "pt", "in_shape", "=", "False", "# Ported nearly line-for-line from the Javascript API", "for", "ring", "in", "self", ".", "_json_rings", ":", "for", "idx", "in", "range", "(", "len", "(", "ring", ")", ")", ":", "idxp1", "=", "idx", "+", "1", "if", "idxp1", ">=", "len", "(", "ring", ")", ":", "idxp1", "-=", "len", "(", "ring", ")", "pi", ",", "pj", "=", "ring", "[", "idx", "]", ",", "ring", "[", "idxp1", "]", "# Divide-by-zero checks", "if", "(", "pi", "[", "1", "]", "==", "pj", "[", "1", "]", ")", "and", "pty", ">=", "min", "(", "(", "pi", "[", "1", "]", ",", "pj", "[", "1", "]", ")", ")", ":", "if", "ptx", ">=", "max", "(", "(", "pi", "[", "0", "]", ",", "pj", "[", "0", "]", ")", ")", ":", "in_shape", "=", "not", "in_shape", "elif", "(", "pi", "[", "0", "]", "==", "pj", "[", "0", "]", ")", "and", "pty", ">=", "min", "(", "(", "pi", "[", "0", "]", ",", "pj", "[", "0", "]", ")", ")", ":", "if", "ptx", ">=", "max", "(", "(", "pi", "[", "1", "]", ",", "pj", "[", "1", "]", ")", ")", ":", "in_shape", "=", "not", "in_shape", "elif", "(", "(", "(", "pi", "[", "1", "]", "<", "pty", "and", "pj", "[", "1", "]", ">=", "pty", ")", "or", "(", "pj", "[", "1", "]", "<", "pty", "and", "pi", "[", "1", "]", ">=", "pty", ")", ")", "and", "(", "pi", "[", "0", "]", "+", "(", "pty", "-", "pi", "[", "1", "]", ")", "/", "(", "pj", "[", "1", "]", "-", "pi", "[", "1", "]", ")", "*", "(", "pj", "[", "0", "]", "-", "pi", "[", "0", "]", ")", "<", "ptx", ")", ")", ":", "in_shape", "=", "not", "in_shape", "return", "in_shape" ]
Tests if the provided point is in the polygon.
[ "Tests", "if", "the", "provided", "point", "is", "in", "the", "polygon", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/GettextCommon.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/GettextCommon.py#L114-L116
def Entry(self, name, directory=None, create=1): """ Create `SCons.Node.FS.Entry` """ return self._create_node(name, self.env.fs.Entry, directory, create)
[ "def", "Entry", "(", "self", ",", "name", ",", "directory", "=", "None", ",", "create", "=", "1", ")", ":", "return", "self", ".", "_create_node", "(", "name", ",", "self", ".", "env", ".", "fs", ".", "Entry", ",", "directory", ",", "create", ")" ]
Create `SCons.Node.FS.Entry`
[ "Create", "SCons", ".", "Node", ".", "FS", ".", "Entry" ]
python
train
timothyb0912/pylogit
pylogit/bootstrap_mle.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_mle.py#L103-L133
def get_model_creation_kwargs(model_obj): """ Get a dictionary of the keyword arguments needed to create the passed model object using `pylogit.create_choice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- model_kwargs : dict. Contains the keyword arguments and the required values that are needed to initialize a replica of `model_obj`. """ # Extract the model abbreviation for this model model_abbrev = get_model_abbrev(model_obj) # Create a dictionary to store the keyword arguments needed to Initialize # the new model object.d model_kwargs = {"model_type": model_abbrev, "names": model_obj.name_spec, "intercept_names": model_obj.intercept_names, "intercept_ref_pos": model_obj.intercept_ref_position, "shape_names": model_obj.shape_names, "shape_ref_pos": model_obj.shape_ref_position, "nest_spec": model_obj.nest_spec, "mixing_vars": model_obj.mixing_vars, "mixing_id_col": model_obj.mixing_id_col} return model_kwargs
[ "def", "get_model_creation_kwargs", "(", "model_obj", ")", ":", "# Extract the model abbreviation for this model", "model_abbrev", "=", "get_model_abbrev", "(", "model_obj", ")", "# Create a dictionary to store the keyword arguments needed to Initialize", "# the new model object.d", "model_kwargs", "=", "{", "\"model_type\"", ":", "model_abbrev", ",", "\"names\"", ":", "model_obj", ".", "name_spec", ",", "\"intercept_names\"", ":", "model_obj", ".", "intercept_names", ",", "\"intercept_ref_pos\"", ":", "model_obj", ".", "intercept_ref_position", ",", "\"shape_names\"", ":", "model_obj", ".", "shape_names", ",", "\"shape_ref_pos\"", ":", "model_obj", ".", "shape_ref_position", ",", "\"nest_spec\"", ":", "model_obj", ".", "nest_spec", ",", "\"mixing_vars\"", ":", "model_obj", ".", "mixing_vars", ",", "\"mixing_id_col\"", ":", "model_obj", ".", "mixing_id_col", "}", "return", "model_kwargs" ]
Get a dictionary of the keyword arguments needed to create the passed model object using `pylogit.create_choice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- model_kwargs : dict. Contains the keyword arguments and the required values that are needed to initialize a replica of `model_obj`.
[ "Get", "a", "dictionary", "of", "the", "keyword", "arguments", "needed", "to", "create", "the", "passed", "model", "object", "using", "pylogit", ".", "create_choice_model", "." ]
python
train
dddomodossola/remi
editor/editor.py
https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/editor/editor.py#L656-L680
def configure_widget_for_editing(self, widget): """ A widget have to be added to the editor, it is configured here in order to be conformant to the editor """ if not 'editor_varname' in widget.attributes: return widget.onclick.do(self.on_widget_selection) #setup of the on_dropped function of the widget in order to manage the dragNdrop widget.__class__.on_dropped = on_dropped #drag properties #widget.style['resize'] = 'both' widget.style['overflow'] = 'auto' widget.attributes['draggable'] = 'true' widget.attributes['tabindex']=str(self.tabindex) #if not 'position' in widget.style.keys(): # widget.style['position'] = 'absolute' #if not 'left' in widget.style.keys(): # widget.style['left'] = '1px' #if not 'top' in widget.style.keys(): # widget.style['top'] = '1px' self.tabindex += 1
[ "def", "configure_widget_for_editing", "(", "self", ",", "widget", ")", ":", "if", "not", "'editor_varname'", "in", "widget", ".", "attributes", ":", "return", "widget", ".", "onclick", ".", "do", "(", "self", ".", "on_widget_selection", ")", "#setup of the on_dropped function of the widget in order to manage the dragNdrop ", "widget", ".", "__class__", ".", "on_dropped", "=", "on_dropped", "#drag properties", "#widget.style['resize'] = 'both'", "widget", ".", "style", "[", "'overflow'", "]", "=", "'auto'", "widget", ".", "attributes", "[", "'draggable'", "]", "=", "'true'", "widget", ".", "attributes", "[", "'tabindex'", "]", "=", "str", "(", "self", ".", "tabindex", ")", "#if not 'position' in widget.style.keys():", "# widget.style['position'] = 'absolute'", "#if not 'left' in widget.style.keys():", "# widget.style['left'] = '1px'", "#if not 'top' in widget.style.keys():", "# widget.style['top'] = '1px'", "self", ".", "tabindex", "+=", "1" ]
A widget have to be added to the editor, it is configured here in order to be conformant to the editor
[ "A", "widget", "have", "to", "be", "added", "to", "the", "editor", "it", "is", "configured", "here", "in", "order", "to", "be", "conformant", "to", "the", "editor" ]
python
train
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L78-L87
def setEncoder(self, encoder): """ Sets the client's encoder ``encoder`` should be an instance of a ``json.JSONEncoder`` class """ if not encoder: self._encoder = json.JSONEncoder() else: self._encoder = encoder self._encode = self._encoder.encode
[ "def", "setEncoder", "(", "self", ",", "encoder", ")", ":", "if", "not", "encoder", ":", "self", ".", "_encoder", "=", "json", ".", "JSONEncoder", "(", ")", "else", ":", "self", ".", "_encoder", "=", "encoder", "self", ".", "_encode", "=", "self", ".", "_encoder", ".", "encode" ]
Sets the client's encoder ``encoder`` should be an instance of a ``json.JSONEncoder`` class
[ "Sets", "the", "client", "s", "encoder", "encoder", "should", "be", "an", "instance", "of", "a", "json", ".", "JSONEncoder", "class" ]
python
train
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L825-L895
def generate_all_paired_mutations_for_position(self, chain_ids, chain_sequence_mappings = {}, residue_ids_to_ignore = [], typed_residue_ids_to_ignore = [], silent = True): '''Generates a set of mutations for the chains in chain_ids where each set corresponds to the "same" residue (see below) in both chains and where the wildtype residues match. e.g. if chain A and B both have K19 then the set of mutations K19A, ... K19I, K19L, K19Y will be included in in the returned results unless 19 is in residue_ids_to_ignore or typed_residue_ids_to_ignore. residue_ids_to_ignore should be a list/set of residue IDs. typed_residue_ids_to_ignore should be a dict residue ID -> residue AA. It is used similarly to residue_ids_to_ignore but we also assert that the residue types match the sequences in the chains. By default, "same residue" is inferred by residue ID i.e. the generation assumes that a residue with some ID in one chain corresponds to the residue with the same ID in another chain. If this is not true then a mapping between chain residues is necessary and should be provided using the chain_sequence_mappings parameter. chain_sequence_mappings should be a dict from pairs of chain IDs to SequenceMap objects. As all sequences are compared with the first chain in chain_ids, only mappings from that first chain to any other chain are used. This function is useful in certain cases e.g. generating a set of mutations where we make the same mutation in both chains of a homodimer or a quasi-homodimer (where we only mutate the positions which agree). ''' residue_ids_to_ignore = set([str(r).strip() for r in residue_ids_to_ignore]) for k, v in typed_residue_ids_to_ignore.iteritems(): typed_residue_ids_to_ignore[k] = v.strip() assert(len(chain_ids) > 0) first_chain = chain_ids[0] mutations = [] if sorted(set(self.atom_sequences.keys()).intersection(set(chain_ids))) == sorted(chain_ids): aas = sorted(residue_type_3to1_map.values()) aas.remove('X') sequence = self.atom_sequences[first_chain] for res_id in sequence.order: chain_res_ids = {} for c in chain_ids: chain_res_ids[c] = c + res_id[1:] if c != first_chain and chain_sequence_mappings.get((first_chain, c)): chain_res_ids[c] = chain_sequence_mappings[(first_chain, c)][res_id] sres_id = str(res_id)[1:].strip() skip = sres_id in residue_ids_to_ignore if not skip and sres_id in typed_residue_ids_to_ignore: for c in chain_ids: if chain_res_ids[c] in self.atom_sequences[c].sequence: if not typed_residue_ids_to_ignore[sres_id] == self.atom_sequences[c][chain_res_ids[c]].ResidueAA: raise Exception('Expected to find {0} at residue {1} but found {2} in chain {3} at this position.'.format(typed_residue_ids_to_ignore[sres_id], sres_id, self.atom_sequences[c][chain_res_id].ResidueAA, c)) skip = True if skip: if not silent: print('Skipping residue {0} as requested.'.format(res_id)) continue for c in chain_ids: if (chain_res_ids[c]) not in self.atom_sequences[c].sequence: if not silent: print('Skipping residue {0} as it is missing from chain {1}.'.format(res_id, c)) skip = True if skip: continue chain_res_aas = set([self.atom_sequences[c][chain_res_ids[c]].ResidueAA for c in chain_ids if chain_res_ids[c] in self.atom_sequences[c].sequence]) if len(chain_res_aas) > 1: if not silent: colortext.warning('Skipping residue {0} as the amino acid type differs between the specified chains.'.format(res_id)) continue wt_aa = chain_res_aas.pop() for mut_aa in aas: if mut_aa != wt_aa: mutations.append([ChainMutation(wt_aa, str(chain_res_ids[c])[1:].strip(), mut_aa, Chain = c) for c in chain_ids]) return mutations else: raise Exception('Chain(s) {0} could not be found in the PDB file.'.format(', '.join(sorted(set(chain_ids).difference(set(self.atom_sequences.keys()))))))
[ "def", "generate_all_paired_mutations_for_position", "(", "self", ",", "chain_ids", ",", "chain_sequence_mappings", "=", "{", "}", ",", "residue_ids_to_ignore", "=", "[", "]", ",", "typed_residue_ids_to_ignore", "=", "[", "]", ",", "silent", "=", "True", ")", ":", "residue_ids_to_ignore", "=", "set", "(", "[", "str", "(", "r", ")", ".", "strip", "(", ")", "for", "r", "in", "residue_ids_to_ignore", "]", ")", "for", "k", ",", "v", "in", "typed_residue_ids_to_ignore", ".", "iteritems", "(", ")", ":", "typed_residue_ids_to_ignore", "[", "k", "]", "=", "v", ".", "strip", "(", ")", "assert", "(", "len", "(", "chain_ids", ")", ">", "0", ")", "first_chain", "=", "chain_ids", "[", "0", "]", "mutations", "=", "[", "]", "if", "sorted", "(", "set", "(", "self", ".", "atom_sequences", ".", "keys", "(", ")", ")", ".", "intersection", "(", "set", "(", "chain_ids", ")", ")", ")", "==", "sorted", "(", "chain_ids", ")", ":", "aas", "=", "sorted", "(", "residue_type_3to1_map", ".", "values", "(", ")", ")", "aas", ".", "remove", "(", "'X'", ")", "sequence", "=", "self", ".", "atom_sequences", "[", "first_chain", "]", "for", "res_id", "in", "sequence", ".", "order", ":", "chain_res_ids", "=", "{", "}", "for", "c", "in", "chain_ids", ":", "chain_res_ids", "[", "c", "]", "=", "c", "+", "res_id", "[", "1", ":", "]", "if", "c", "!=", "first_chain", "and", "chain_sequence_mappings", ".", "get", "(", "(", "first_chain", ",", "c", ")", ")", ":", "chain_res_ids", "[", "c", "]", "=", "chain_sequence_mappings", "[", "(", "first_chain", ",", "c", ")", "]", "[", "res_id", "]", "sres_id", "=", "str", "(", "res_id", ")", "[", "1", ":", "]", ".", "strip", "(", ")", "skip", "=", "sres_id", "in", "residue_ids_to_ignore", "if", "not", "skip", "and", "sres_id", "in", "typed_residue_ids_to_ignore", ":", "for", "c", "in", "chain_ids", ":", "if", "chain_res_ids", "[", "c", "]", "in", "self", ".", "atom_sequences", "[", "c", "]", ".", "sequence", ":", "if", "not", "typed_residue_ids_to_ignore", "[", "sres_id", "]", "==", "self", ".", "atom_sequences", "[", "c", "]", "[", "chain_res_ids", "[", "c", "]", "]", ".", "ResidueAA", ":", "raise", "Exception", "(", "'Expected to find {0} at residue {1} but found {2} in chain {3} at this position.'", ".", "format", "(", "typed_residue_ids_to_ignore", "[", "sres_id", "]", ",", "sres_id", ",", "self", ".", "atom_sequences", "[", "c", "]", "[", "chain_res_id", "]", ".", "ResidueAA", ",", "c", ")", ")", "skip", "=", "True", "if", "skip", ":", "if", "not", "silent", ":", "print", "(", "'Skipping residue {0} as requested.'", ".", "format", "(", "res_id", ")", ")", "continue", "for", "c", "in", "chain_ids", ":", "if", "(", "chain_res_ids", "[", "c", "]", ")", "not", "in", "self", ".", "atom_sequences", "[", "c", "]", ".", "sequence", ":", "if", "not", "silent", ":", "print", "(", "'Skipping residue {0} as it is missing from chain {1}.'", ".", "format", "(", "res_id", ",", "c", ")", ")", "skip", "=", "True", "if", "skip", ":", "continue", "chain_res_aas", "=", "set", "(", "[", "self", ".", "atom_sequences", "[", "c", "]", "[", "chain_res_ids", "[", "c", "]", "]", ".", "ResidueAA", "for", "c", "in", "chain_ids", "if", "chain_res_ids", "[", "c", "]", "in", "self", ".", "atom_sequences", "[", "c", "]", ".", "sequence", "]", ")", "if", "len", "(", "chain_res_aas", ")", ">", "1", ":", "if", "not", "silent", ":", "colortext", ".", "warning", "(", "'Skipping residue {0} as the amino acid type differs between the specified chains.'", ".", "format", "(", "res_id", ")", ")", "continue", "wt_aa", "=", "chain_res_aas", ".", "pop", "(", ")", "for", "mut_aa", "in", "aas", ":", "if", "mut_aa", "!=", "wt_aa", ":", "mutations", ".", "append", "(", "[", "ChainMutation", "(", "wt_aa", ",", "str", "(", "chain_res_ids", "[", "c", "]", ")", "[", "1", ":", "]", ".", "strip", "(", ")", ",", "mut_aa", ",", "Chain", "=", "c", ")", "for", "c", "in", "chain_ids", "]", ")", "return", "mutations", "else", ":", "raise", "Exception", "(", "'Chain(s) {0} could not be found in the PDB file.'", ".", "format", "(", "', '", ".", "join", "(", "sorted", "(", "set", "(", "chain_ids", ")", ".", "difference", "(", "set", "(", "self", ".", "atom_sequences", ".", "keys", "(", ")", ")", ")", ")", ")", ")", ")" ]
Generates a set of mutations for the chains in chain_ids where each set corresponds to the "same" residue (see below) in both chains and where the wildtype residues match. e.g. if chain A and B both have K19 then the set of mutations K19A, ... K19I, K19L, K19Y will be included in in the returned results unless 19 is in residue_ids_to_ignore or typed_residue_ids_to_ignore. residue_ids_to_ignore should be a list/set of residue IDs. typed_residue_ids_to_ignore should be a dict residue ID -> residue AA. It is used similarly to residue_ids_to_ignore but we also assert that the residue types match the sequences in the chains. By default, "same residue" is inferred by residue ID i.e. the generation assumes that a residue with some ID in one chain corresponds to the residue with the same ID in another chain. If this is not true then a mapping between chain residues is necessary and should be provided using the chain_sequence_mappings parameter. chain_sequence_mappings should be a dict from pairs of chain IDs to SequenceMap objects. As all sequences are compared with the first chain in chain_ids, only mappings from that first chain to any other chain are used. This function is useful in certain cases e.g. generating a set of mutations where we make the same mutation in both chains of a homodimer or a quasi-homodimer (where we only mutate the positions which agree).
[ "Generates", "a", "set", "of", "mutations", "for", "the", "chains", "in", "chain_ids", "where", "each", "set", "corresponds", "to", "the", "same", "residue", "(", "see", "below", ")", "in", "both", "chains", "and", "where", "the", "wildtype", "residues", "match", ".", "e", ".", "g", ".", "if", "chain", "A", "and", "B", "both", "have", "K19", "then", "the", "set", "of", "mutations", "K19A", "...", "K19I", "K19L", "K19Y", "will", "be", "included", "in", "in", "the", "returned", "results", "unless", "19", "is", "in", "residue_ids_to_ignore", "or", "typed_residue_ids_to_ignore", ".", "residue_ids_to_ignore", "should", "be", "a", "list", "/", "set", "of", "residue", "IDs", ".", "typed_residue_ids_to_ignore", "should", "be", "a", "dict", "residue", "ID", "-", ">", "residue", "AA", ".", "It", "is", "used", "similarly", "to", "residue_ids_to_ignore", "but", "we", "also", "assert", "that", "the", "residue", "types", "match", "the", "sequences", "in", "the", "chains", "." ]
python
train
biosustain/optlang
optlang/interface.py
https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/interface.py#L1417-L1452
def update(self, callback=int): """Process all pending model modifications.""" # print(self._pending_modifications) add_var = self._pending_modifications.add_var if len(add_var) > 0: self._add_variables(add_var) self._pending_modifications.add_var = [] callback() add_constr = self._pending_modifications.add_constr if len(add_constr) > 0: self._add_constraints(add_constr) self._pending_modifications.add_constr = [] add_constr_sloppy = self._pending_modifications.add_constr_sloppy if len(add_constr_sloppy) > 0: self._add_constraints(add_constr_sloppy, sloppy=True) self._pending_modifications.add_constr_sloppy = [] var_lb = self._pending_modifications.var_lb var_ub = self._pending_modifications.var_ub if len(var_lb) > 0 or len(var_ub) > 0: self._set_variable_bounds_on_problem(var_lb, var_ub) self._pending_modifications.var_lb = [] self._pending_modifications.var_ub = [] rm_var = self._pending_modifications.rm_var if len(rm_var) > 0: self._remove_variables(rm_var) self._pending_modifications.rm_var = [] callback() rm_constr = self._pending_modifications.rm_constr if len(rm_constr) > 0: self._remove_constraints(rm_constr) self._pending_modifications.rm_constr = []
[ "def", "update", "(", "self", ",", "callback", "=", "int", ")", ":", "# print(self._pending_modifications)", "add_var", "=", "self", ".", "_pending_modifications", ".", "add_var", "if", "len", "(", "add_var", ")", ">", "0", ":", "self", ".", "_add_variables", "(", "add_var", ")", "self", ".", "_pending_modifications", ".", "add_var", "=", "[", "]", "callback", "(", ")", "add_constr", "=", "self", ".", "_pending_modifications", ".", "add_constr", "if", "len", "(", "add_constr", ")", ">", "0", ":", "self", ".", "_add_constraints", "(", "add_constr", ")", "self", ".", "_pending_modifications", ".", "add_constr", "=", "[", "]", "add_constr_sloppy", "=", "self", ".", "_pending_modifications", ".", "add_constr_sloppy", "if", "len", "(", "add_constr_sloppy", ")", ">", "0", ":", "self", ".", "_add_constraints", "(", "add_constr_sloppy", ",", "sloppy", "=", "True", ")", "self", ".", "_pending_modifications", ".", "add_constr_sloppy", "=", "[", "]", "var_lb", "=", "self", ".", "_pending_modifications", ".", "var_lb", "var_ub", "=", "self", ".", "_pending_modifications", ".", "var_ub", "if", "len", "(", "var_lb", ")", ">", "0", "or", "len", "(", "var_ub", ")", ">", "0", ":", "self", ".", "_set_variable_bounds_on_problem", "(", "var_lb", ",", "var_ub", ")", "self", ".", "_pending_modifications", ".", "var_lb", "=", "[", "]", "self", ".", "_pending_modifications", ".", "var_ub", "=", "[", "]", "rm_var", "=", "self", ".", "_pending_modifications", ".", "rm_var", "if", "len", "(", "rm_var", ")", ">", "0", ":", "self", ".", "_remove_variables", "(", "rm_var", ")", "self", ".", "_pending_modifications", ".", "rm_var", "=", "[", "]", "callback", "(", ")", "rm_constr", "=", "self", ".", "_pending_modifications", ".", "rm_constr", "if", "len", "(", "rm_constr", ")", ">", "0", ":", "self", ".", "_remove_constraints", "(", "rm_constr", ")", "self", ".", "_pending_modifications", ".", "rm_constr", "=", "[", "]" ]
Process all pending model modifications.
[ "Process", "all", "pending", "model", "modifications", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/aux_funcs.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L149-L158
def remove_if_exist(path): # pragma: no cover """Delete a file or a directory recursively if it exists, else no exception is raised""" if os.path.exists(path): if os.path.isdir(path): shutil.rmtree(path) return True elif os.path.isfile(path): os.remove(path) return True return False
[ "def", "remove_if_exist", "(", "path", ")", ":", "# pragma: no cover", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "return", "True", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")", "return", "True", "return", "False" ]
Delete a file or a directory recursively if it exists, else no exception is raised
[ "Delete", "a", "file", "or", "a", "directory", "recursively", "if", "it", "exists", "else", "no", "exception", "is", "raised" ]
python
train
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2109-L2147
def ls_(active=None, cache=True, path=None): ''' Return a list of the containers available on the minion path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 active If ``True``, return only active (i.e. running) containers .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' lxc.ls salt '*' lxc.ls active=True ''' contextvar = 'lxc.ls{0}'.format(path) if active: contextvar += '.active' if cache and (contextvar in __context__): return __context__[contextvar] else: ret = [] cmd = 'lxc-ls' if path: cmd += ' -P {0}'.format(pipes.quote(path)) if active: cmd += ' --active' output = __salt__['cmd.run_stdout'](cmd, python_shell=False) for line in output.splitlines(): ret.extend(line.split()) __context__[contextvar] = ret return ret
[ "def", "ls_", "(", "active", "=", "None", ",", "cache", "=", "True", ",", "path", "=", "None", ")", ":", "contextvar", "=", "'lxc.ls{0}'", ".", "format", "(", "path", ")", "if", "active", ":", "contextvar", "+=", "'.active'", "if", "cache", "and", "(", "contextvar", "in", "__context__", ")", ":", "return", "__context__", "[", "contextvar", "]", "else", ":", "ret", "=", "[", "]", "cmd", "=", "'lxc-ls'", "if", "path", ":", "cmd", "+=", "' -P {0}'", ".", "format", "(", "pipes", ".", "quote", "(", "path", ")", ")", "if", "active", ":", "cmd", "+=", "' --active'", "output", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "ret", ".", "extend", "(", "line", ".", "split", "(", ")", ")", "__context__", "[", "contextvar", "]", "=", "ret", "return", "ret" ]
Return a list of the containers available on the minion path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 active If ``True``, return only active (i.e. running) containers .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' lxc.ls salt '*' lxc.ls active=True
[ "Return", "a", "list", "of", "the", "containers", "available", "on", "the", "minion" ]
python
train
osrg/ryu
ryu/services/protocols/bgp/peer.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/peer.py#L2138-L2150
def _enqueue_init_updates(self): """Enqueues current routes to be shared with this peer.""" assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED if self.is_mbgp_cap_valid(RF_RTC_UC): # Enqueues all best-RTC_NLRIs to be sent as initial update to this # peer. self._peer_manager.comm_all_rt_nlris(self) self._schedule_sending_init_updates() else: # Enqueues all best-path to be sent as initial update to this peer # expect for RTC route-family. tm = self._core_service.table_manager self.comm_all_best_paths(tm.global_tables)
[ "def", "_enqueue_init_updates", "(", "self", ")", ":", "assert", "self", ".", "state", ".", "bgp_state", "==", "const", ".", "BGP_FSM_ESTABLISHED", "if", "self", ".", "is_mbgp_cap_valid", "(", "RF_RTC_UC", ")", ":", "# Enqueues all best-RTC_NLRIs to be sent as initial update to this", "# peer.", "self", ".", "_peer_manager", ".", "comm_all_rt_nlris", "(", "self", ")", "self", ".", "_schedule_sending_init_updates", "(", ")", "else", ":", "# Enqueues all best-path to be sent as initial update to this peer", "# expect for RTC route-family.", "tm", "=", "self", ".", "_core_service", ".", "table_manager", "self", ".", "comm_all_best_paths", "(", "tm", ".", "global_tables", ")" ]
Enqueues current routes to be shared with this peer.
[ "Enqueues", "current", "routes", "to", "be", "shared", "with", "this", "peer", "." ]
python
train
bjoernricks/python-quilt
quilt/push.py
https://github.com/bjoernricks/python-quilt/blob/fae88237f601848cc34d073584d9dcb409f01777/quilt/push.py#L131-L150
def apply_all(self, force=False, quiet=False): """ Apply all patches in series file """ self._check() top = self.db.top_patch() if top: patches = self.series.patches_after(top) else: patches = self.series.patches() if not patches: raise AllPatchesApplied(self.series, top) try: for patch in patches: self.applying(patch) self._apply_patch(patch, force, quiet) finally: self.db.save() self.applied(self.db.top_patch())
[ "def", "apply_all", "(", "self", ",", "force", "=", "False", ",", "quiet", "=", "False", ")", ":", "self", ".", "_check", "(", ")", "top", "=", "self", ".", "db", ".", "top_patch", "(", ")", "if", "top", ":", "patches", "=", "self", ".", "series", ".", "patches_after", "(", "top", ")", "else", ":", "patches", "=", "self", ".", "series", ".", "patches", "(", ")", "if", "not", "patches", ":", "raise", "AllPatchesApplied", "(", "self", ".", "series", ",", "top", ")", "try", ":", "for", "patch", "in", "patches", ":", "self", ".", "applying", "(", "patch", ")", "self", ".", "_apply_patch", "(", "patch", ",", "force", ",", "quiet", ")", "finally", ":", "self", ".", "db", ".", "save", "(", ")", "self", ".", "applied", "(", "self", ".", "db", ".", "top_patch", "(", ")", ")" ]
Apply all patches in series file
[ "Apply", "all", "patches", "in", "series", "file" ]
python
test
cocaine/cocaine-tools
cocaine/tools/dispatch.py
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1388-L1400
def group_push(name, app, weight, **kwargs): """ Add application with its weight into the routing group. Warning: application weight must be positive integer. """ ctx = Context(**kwargs) ctx.execute_action('group:app:add', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'app': app, 'weight': weight, })
[ "def", "group_push", "(", "name", ",", "app", ",", "weight", ",", "*", "*", "kwargs", ")", ":", "ctx", "=", "Context", "(", "*", "*", "kwargs", ")", "ctx", ".", "execute_action", "(", "'group:app:add'", ",", "*", "*", "{", "'storage'", ":", "ctx", ".", "repo", ".", "create_secure_service", "(", "'storage'", ")", ",", "'name'", ":", "name", ",", "'app'", ":", "app", ",", "'weight'", ":", "weight", ",", "}", ")" ]
Add application with its weight into the routing group. Warning: application weight must be positive integer.
[ "Add", "application", "with", "its", "weight", "into", "the", "routing", "group", "." ]
python
train
ryanjdillon/pyotelem
pyotelem/dynamics.py
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dynamics.py#L34-L53
def pitch(ax, ay, az): '''Angle of x-axis relative to ground (theta) Args ---- ax: ndarray x-axis acceleration values ay: ndarray y-axis acceleration values az: ndarray z-axis acceleration values Returns ------- pitch: ndarray Pitch angle in radians ''' import numpy # arctan2 not needed here to cover all quadrants, just for consistency return numpy.arctan(ax, numpy.sqrt(ay**2+az**2))
[ "def", "pitch", "(", "ax", ",", "ay", ",", "az", ")", ":", "import", "numpy", "# arctan2 not needed here to cover all quadrants, just for consistency", "return", "numpy", ".", "arctan", "(", "ax", ",", "numpy", ".", "sqrt", "(", "ay", "**", "2", "+", "az", "**", "2", ")", ")" ]
Angle of x-axis relative to ground (theta) Args ---- ax: ndarray x-axis acceleration values ay: ndarray y-axis acceleration values az: ndarray z-axis acceleration values Returns ------- pitch: ndarray Pitch angle in radians
[ "Angle", "of", "x", "-", "axis", "relative", "to", "ground", "(", "theta", ")" ]
python
train
arve0/leicascanningtemplate
leicascanningtemplate/template.py
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L203-L207
def count_of_assigned_jobs(self): "Number of fields that have attrib['JobAssigned'] set to true." assigned = len([x.attrib['JobAssigned'] for x in self.fields if x.attrib['JobAssigned'] == 'true']) return assigned
[ "def", "count_of_assigned_jobs", "(", "self", ")", ":", "assigned", "=", "len", "(", "[", "x", ".", "attrib", "[", "'JobAssigned'", "]", "for", "x", "in", "self", ".", "fields", "if", "x", ".", "attrib", "[", "'JobAssigned'", "]", "==", "'true'", "]", ")", "return", "assigned" ]
Number of fields that have attrib['JobAssigned'] set to true.
[ "Number", "of", "fields", "that", "have", "attrib", "[", "JobAssigned", "]", "set", "to", "true", "." ]
python
train
benjamin-hodgson/Contexts
src/contexts/tools.py
https://github.com/benjamin-hodgson/Contexts/blob/f5ee6a08aed19ab157158c1fc7752cff18cceb91/src/contexts/tools.py#L6-L22
def catch(func, *args, **kwargs): """ Call the supplied function with the supplied arguments, catching and returning any exception that it throws. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: If the function throws an exception, return the exception. If the function does not throw an exception, return None. """ try: func(*args, **kwargs) except Exception as e: return e
[ "def", "catch", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "return", "e" ]
Call the supplied function with the supplied arguments, catching and returning any exception that it throws. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: If the function throws an exception, return the exception. If the function does not throw an exception, return None.
[ "Call", "the", "supplied", "function", "with", "the", "supplied", "arguments", "catching", "and", "returning", "any", "exception", "that", "it", "throws", "." ]
python
train
Nagasaki45/bibo
bibo/internals.py
https://github.com/Nagasaki45/bibo/blob/e6afb28711e78eb11475834d3f9455252ac9f347/bibo/internals.py#L111-L119
def editor(*args, **kwargs): ''' Wrapper for `click.edit` that raises an error when None is returned. ''' result = click.edit(*args, **kwargs) if result is None: msg = 'Editor exited without saving, command aborted' raise click.ClickException(msg) return result
[ "def", "editor", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "click", ".", "edit", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "result", "is", "None", ":", "msg", "=", "'Editor exited without saving, command aborted'", "raise", "click", ".", "ClickException", "(", "msg", ")", "return", "result" ]
Wrapper for `click.edit` that raises an error when None is returned.
[ "Wrapper", "for", "click", ".", "edit", "that", "raises", "an", "error", "when", "None", "is", "returned", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L275-L281
def evaluate_tour_Q(self, tour): """ Use Cythonized version to evaluate the score of a current tour, taking orientation into consideration. This may be the most accurate evaluation under the right condition. """ from .chic import score_evaluate_Q return score_evaluate_Q(tour, self.active_sizes, self.Q)
[ "def", "evaluate_tour_Q", "(", "self", ",", "tour", ")", ":", "from", ".", "chic", "import", "score_evaluate_Q", "return", "score_evaluate_Q", "(", "tour", ",", "self", ".", "active_sizes", ",", "self", ".", "Q", ")" ]
Use Cythonized version to evaluate the score of a current tour, taking orientation into consideration. This may be the most accurate evaluation under the right condition.
[ "Use", "Cythonized", "version", "to", "evaluate", "the", "score", "of", "a", "current", "tour", "taking", "orientation", "into", "consideration", ".", "This", "may", "be", "the", "most", "accurate", "evaluation", "under", "the", "right", "condition", "." ]
python
train
chrismattmann/tika-python
tika/tika.py
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L448-L476
def detectType1(option, urlOrPath, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar, responseMimeType='text/plain', services={'type': '/detect/stream'}, config_path=None): ''' Detect the MIME/media type of the stream and return it in text/plain. :param option: :param urlOrPath: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param services: :return: ''' path, mode = getRemoteFile(urlOrPath, TikaFilesPath) if option not in services: log.exception('Detect option must be one of %s' % binary_string(services.keys())) raise TikaException('Detect option must be one of %s' % binary_string(services.keys())) service = services[option] status, response = callServer('put', serverEndpoint, service, open(path, 'rb'), { 'Accept': responseMimeType, 'Content-Disposition': make_content_disposition_header(path) }, verbose, tikaServerJar, config_path=config_path) if csvOutput == 1: return(status, urlOrPath.decode("UTF-8") + "," + response) else: return (status, response)
[ "def", "detectType1", "(", "option", ",", "urlOrPath", ",", "serverEndpoint", "=", "ServerEndpoint", ",", "verbose", "=", "Verbose", ",", "tikaServerJar", "=", "TikaServerJar", ",", "responseMimeType", "=", "'text/plain'", ",", "services", "=", "{", "'type'", ":", "'/detect/stream'", "}", ",", "config_path", "=", "None", ")", ":", "path", ",", "mode", "=", "getRemoteFile", "(", "urlOrPath", ",", "TikaFilesPath", ")", "if", "option", "not", "in", "services", ":", "log", ".", "exception", "(", "'Detect option must be one of %s'", "%", "binary_string", "(", "services", ".", "keys", "(", ")", ")", ")", "raise", "TikaException", "(", "'Detect option must be one of %s'", "%", "binary_string", "(", "services", ".", "keys", "(", ")", ")", ")", "service", "=", "services", "[", "option", "]", "status", ",", "response", "=", "callServer", "(", "'put'", ",", "serverEndpoint", ",", "service", ",", "open", "(", "path", ",", "'rb'", ")", ",", "{", "'Accept'", ":", "responseMimeType", ",", "'Content-Disposition'", ":", "make_content_disposition_header", "(", "path", ")", "}", ",", "verbose", ",", "tikaServerJar", ",", "config_path", "=", "config_path", ")", "if", "csvOutput", "==", "1", ":", "return", "(", "status", ",", "urlOrPath", ".", "decode", "(", "\"UTF-8\"", ")", "+", "\",\"", "+", "response", ")", "else", ":", "return", "(", "status", ",", "response", ")" ]
Detect the MIME/media type of the stream and return it in text/plain. :param option: :param urlOrPath: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param services: :return:
[ "Detect", "the", "MIME", "/", "media", "type", "of", "the", "stream", "and", "return", "it", "in", "text", "/", "plain", ".", ":", "param", "option", ":", ":", "param", "urlOrPath", ":", ":", "param", "serverEndpoint", ":", ":", "param", "verbose", ":", ":", "param", "tikaServerJar", ":", ":", "param", "responseMimeType", ":", ":", "param", "services", ":", ":", "return", ":" ]
python
train
molmod/molmod
molmod/ic.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/ic.py#L666-L680
def _dihed_cos_low(a, b, c, deriv): """Similar to dihed_cos, but with relative vectors""" a = Vector3(9, deriv, a, (0, 1, 2)) b = Vector3(9, deriv, b, (3, 4, 5)) c = Vector3(9, deriv, c, (6, 7, 8)) b /= b.norm() tmp = b.copy() tmp *= dot(a, b) a -= tmp tmp = b.copy() tmp *= dot(c, b) c -= tmp a /= a.norm() c /= c.norm() return dot(a, c).results()
[ "def", "_dihed_cos_low", "(", "a", ",", "b", ",", "c", ",", "deriv", ")", ":", "a", "=", "Vector3", "(", "9", ",", "deriv", ",", "a", ",", "(", "0", ",", "1", ",", "2", ")", ")", "b", "=", "Vector3", "(", "9", ",", "deriv", ",", "b", ",", "(", "3", ",", "4", ",", "5", ")", ")", "c", "=", "Vector3", "(", "9", ",", "deriv", ",", "c", ",", "(", "6", ",", "7", ",", "8", ")", ")", "b", "/=", "b", ".", "norm", "(", ")", "tmp", "=", "b", ".", "copy", "(", ")", "tmp", "*=", "dot", "(", "a", ",", "b", ")", "a", "-=", "tmp", "tmp", "=", "b", ".", "copy", "(", ")", "tmp", "*=", "dot", "(", "c", ",", "b", ")", "c", "-=", "tmp", "a", "/=", "a", ".", "norm", "(", ")", "c", "/=", "c", ".", "norm", "(", ")", "return", "dot", "(", "a", ",", "c", ")", ".", "results", "(", ")" ]
Similar to dihed_cos, but with relative vectors
[ "Similar", "to", "dihed_cos", "but", "with", "relative", "vectors" ]
python
train
corpusops/pdbclone
lib/pdb_clone/bdb.py
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/bdb.py#L631-L636
def restart(self): """Restart the debugger after source code changes.""" _module_finder.reset() linecache.checkcache() for module_bpts in self.breakpoints.values(): module_bpts.reset()
[ "def", "restart", "(", "self", ")", ":", "_module_finder", ".", "reset", "(", ")", "linecache", ".", "checkcache", "(", ")", "for", "module_bpts", "in", "self", ".", "breakpoints", ".", "values", "(", ")", ":", "module_bpts", ".", "reset", "(", ")" ]
Restart the debugger after source code changes.
[ "Restart", "the", "debugger", "after", "source", "code", "changes", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L459-L487
def update_group(self, group, process_id, wit_ref_name, page_id, section_id, group_id): """UpdateGroup. [Preview API] Updates a group in the work item form. :param :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>` group: The updated group. :param str process_id: The ID of the process. :param str wit_ref_name: The reference name of the work item type. :param str page_id: The ID of the page the group is in. :param str section_id: The ID of the section the group is in. :param str group_id: The ID of the group. :rtype: :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>` """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if page_id is not None: route_values['pageId'] = self._serialize.url('page_id', page_id, 'str') if section_id is not None: route_values['sectionId'] = self._serialize.url('section_id', section_id, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') content = self._serialize.body(group, 'Group') response = self._send(http_method='PATCH', location_id='766e44e1-36a8-41d7-9050-c343ff02f7a5', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('Group', response)
[ "def", "update_group", "(", "self", ",", "group", ",", "process_id", ",", "wit_ref_name", ",", "page_id", ",", "section_id", ",", "group_id", ")", ":", "route_values", "=", "{", "}", "if", "process_id", "is", "not", "None", ":", "route_values", "[", "'processId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'process_id'", ",", "process_id", ",", "'str'", ")", "if", "wit_ref_name", "is", "not", "None", ":", "route_values", "[", "'witRefName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'wit_ref_name'", ",", "wit_ref_name", ",", "'str'", ")", "if", "page_id", "is", "not", "None", ":", "route_values", "[", "'pageId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'page_id'", ",", "page_id", ",", "'str'", ")", "if", "section_id", "is", "not", "None", ":", "route_values", "[", "'sectionId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'section_id'", ",", "section_id", ",", "'str'", ")", "if", "group_id", "is", "not", "None", ":", "route_values", "[", "'groupId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'group_id'", ",", "group_id", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "group", ",", "'Group'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'766e44e1-36a8-41d7-9050-c343ff02f7a5'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'Group'", ",", "response", ")" ]
UpdateGroup. [Preview API] Updates a group in the work item form. :param :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>` group: The updated group. :param str process_id: The ID of the process. :param str wit_ref_name: The reference name of the work item type. :param str page_id: The ID of the page the group is in. :param str section_id: The ID of the section the group is in. :param str group_id: The ID of the group. :rtype: :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>`
[ "UpdateGroup", ".", "[", "Preview", "API", "]", "Updates", "a", "group", "in", "the", "work", "item", "form", ".", ":", "param", ":", "class", ":", "<Group", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work_item_tracking_process", ".", "models", ".", "Group", ">", "group", ":", "The", "updated", "group", ".", ":", "param", "str", "process_id", ":", "The", "ID", "of", "the", "process", ".", ":", "param", "str", "wit_ref_name", ":", "The", "reference", "name", "of", "the", "work", "item", "type", ".", ":", "param", "str", "page_id", ":", "The", "ID", "of", "the", "page", "the", "group", "is", "in", ".", ":", "param", "str", "section_id", ":", "The", "ID", "of", "the", "section", "the", "group", "is", "in", ".", ":", "param", "str", "group_id", ":", "The", "ID", "of", "the", "group", ".", ":", "rtype", ":", ":", "class", ":", "<Group", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work_item_tracking_process", ".", "models", ".", "Group", ">" ]
python
train
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L841-L861
def writeinfo(self, linelist, colour = None): """ We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() self.loadinfofont() for i, line in enumerate(linelist): topspacing = 5 + (12 + 5)*i self.draw.text((10, topspacing), line, fill = colour, font = self.infofont) if self.verbose : print "I've written some info on the image."
[ "def", "writeinfo", "(", "self", ",", "linelist", ",", "colour", "=", "None", ")", ":", "self", ".", "checkforpilimage", "(", ")", "colour", "=", "self", ".", "defaultcolour", "(", "colour", ")", "self", ".", "changecolourmode", "(", "colour", ")", "self", ".", "makedraw", "(", ")", "self", ".", "loadinfofont", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "linelist", ")", ":", "topspacing", "=", "5", "+", "(", "12", "+", "5", ")", "*", "i", "self", ".", "draw", ".", "text", "(", "(", "10", ",", "topspacing", ")", ",", "line", ",", "fill", "=", "colour", ",", "font", "=", "self", ".", "infofont", ")", "if", "self", ".", "verbose", ":", "print", "\"I've written some info on the image.\"" ]
We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other.
[ "We", "add", "a", "longer", "chunk", "of", "text", "on", "the", "upper", "left", "corner", "of", "the", "image", ".", "Provide", "linelist", "a", "list", "of", "strings", "that", "will", "be", "written", "one", "below", "the", "other", "." ]
python
train
fastai/fastai
fastai/vision/tta.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/tta.py#L32-L43
def _TTA(learn:Learner, beta:float=0.4, scale:float=1.35, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False) -> Tensors: "Applies TTA to predict on `ds_type` dataset." preds,y = learn.get_preds(ds_type) all_preds = list(learn.tta_only(scale=scale, ds_type=ds_type)) avg_preds = torch.stack(all_preds).mean(0) if beta is None: return preds,avg_preds,y else: final_preds = preds*beta + avg_preds*(1-beta) if with_loss: with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y) return final_preds, y, loss return final_preds, y
[ "def", "_TTA", "(", "learn", ":", "Learner", ",", "beta", ":", "float", "=", "0.4", ",", "scale", ":", "float", "=", "1.35", ",", "ds_type", ":", "DatasetType", "=", "DatasetType", ".", "Valid", ",", "with_loss", ":", "bool", "=", "False", ")", "->", "Tensors", ":", "preds", ",", "y", "=", "learn", ".", "get_preds", "(", "ds_type", ")", "all_preds", "=", "list", "(", "learn", ".", "tta_only", "(", "scale", "=", "scale", ",", "ds_type", "=", "ds_type", ")", ")", "avg_preds", "=", "torch", ".", "stack", "(", "all_preds", ")", ".", "mean", "(", "0", ")", "if", "beta", "is", "None", ":", "return", "preds", ",", "avg_preds", ",", "y", "else", ":", "final_preds", "=", "preds", "*", "beta", "+", "avg_preds", "*", "(", "1", "-", "beta", ")", "if", "with_loss", ":", "with", "NoneReduceOnCPU", "(", "learn", ".", "loss_func", ")", "as", "lf", ":", "loss", "=", "lf", "(", "final_preds", ",", "y", ")", "return", "final_preds", ",", "y", ",", "loss", "return", "final_preds", ",", "y" ]
Applies TTA to predict on `ds_type` dataset.
[ "Applies", "TTA", "to", "predict", "on", "ds_type", "dataset", "." ]
python
train
Autodesk/aomi
aomi/util.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/util.py#L12-L35
def update_user_password(client, userpass): """Will update the password for a userpass user""" vault_path = '' user = '' user_path_bits = userpass.split('/') if len(user_path_bits) == 1: user = user_path_bits[0] vault_path = "auth/userpass/users/%s/password" % user LOG.debug("Updating password for user %s at the default path", user) elif len(user_path_bits) == 2: mount = user_path_bits[0] user = user_path_bits[1] vault_path = "auth/%s/users/%s/password" % (mount, user) LOG.debug("Updating password for user %s at path %s", user, mount) else: client.revoke_self_token() raise aomi.exceptions.AomiCommand("invalid user path") new_password = get_password() obj = { 'user': user, 'password': new_password } client.write(vault_path, **obj)
[ "def", "update_user_password", "(", "client", ",", "userpass", ")", ":", "vault_path", "=", "''", "user", "=", "''", "user_path_bits", "=", "userpass", ".", "split", "(", "'/'", ")", "if", "len", "(", "user_path_bits", ")", "==", "1", ":", "user", "=", "user_path_bits", "[", "0", "]", "vault_path", "=", "\"auth/userpass/users/%s/password\"", "%", "user", "LOG", ".", "debug", "(", "\"Updating password for user %s at the default path\"", ",", "user", ")", "elif", "len", "(", "user_path_bits", ")", "==", "2", ":", "mount", "=", "user_path_bits", "[", "0", "]", "user", "=", "user_path_bits", "[", "1", "]", "vault_path", "=", "\"auth/%s/users/%s/password\"", "%", "(", "mount", ",", "user", ")", "LOG", ".", "debug", "(", "\"Updating password for user %s at path %s\"", ",", "user", ",", "mount", ")", "else", ":", "client", ".", "revoke_self_token", "(", ")", "raise", "aomi", ".", "exceptions", ".", "AomiCommand", "(", "\"invalid user path\"", ")", "new_password", "=", "get_password", "(", ")", "obj", "=", "{", "'user'", ":", "user", ",", "'password'", ":", "new_password", "}", "client", ".", "write", "(", "vault_path", ",", "*", "*", "obj", ")" ]
Will update the password for a userpass user
[ "Will", "update", "the", "password", "for", "a", "userpass", "user" ]
python
train
loli/medpy
medpy/metric/binary.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L223-L268
def specificity(result, reference): """ Specificity. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- specificity : float The specificity between two binary datasets, here mostly binary objects in images, which denotes the fraction of correctly returned negatives. The specificity is not symmetric. See also -------- :func:`sensitivity` Notes ----- Not symmetric. The completment of the specificity is :func:`sensitivity`. High recall means that an algorithm returned most of the irrelevant results. References ---------- .. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion """ result = numpy.atleast_1d(result.astype(numpy.bool)) reference = numpy.atleast_1d(reference.astype(numpy.bool)) tn = numpy.count_nonzero(~result & ~reference) fp = numpy.count_nonzero(result & ~reference) try: specificity = tn / float(tn + fp) except ZeroDivisionError: specificity = 0.0 return specificity
[ "def", "specificity", "(", "result", ",", "reference", ")", ":", "result", "=", "numpy", ".", "atleast_1d", "(", "result", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "reference", "=", "numpy", ".", "atleast_1d", "(", "reference", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "tn", "=", "numpy", ".", "count_nonzero", "(", "~", "result", "&", "~", "reference", ")", "fp", "=", "numpy", ".", "count_nonzero", "(", "result", "&", "~", "reference", ")", "try", ":", "specificity", "=", "tn", "/", "float", "(", "tn", "+", "fp", ")", "except", "ZeroDivisionError", ":", "specificity", "=", "0.0", "return", "specificity" ]
Specificity. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- specificity : float The specificity between two binary datasets, here mostly binary objects in images, which denotes the fraction of correctly returned negatives. The specificity is not symmetric. See also -------- :func:`sensitivity` Notes ----- Not symmetric. The completment of the specificity is :func:`sensitivity`. High recall means that an algorithm returned most of the irrelevant results. References ---------- .. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
[ "Specificity", ".", "Parameters", "----------", "result", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "reference", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "Returns", "-------", "specificity", ":", "float", "The", "specificity", "between", "two", "binary", "datasets", "here", "mostly", "binary", "objects", "in", "images", "which", "denotes", "the", "fraction", "of", "correctly", "returned", "negatives", ".", "The", "specificity", "is", "not", "symmetric", ".", "See", "also", "--------", ":", "func", ":", "sensitivity", "Notes", "-----", "Not", "symmetric", ".", "The", "completment", "of", "the", "specificity", "is", ":", "func", ":", "sensitivity", ".", "High", "recall", "means", "that", "an", "algorithm", "returned", "most", "of", "the", "irrelevant", "results", ".", "References", "----------", "..", "[", "1", "]", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Sensitivity_and_specificity", "..", "[", "2", "]", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Confusion_matrix#Table_of_confusion" ]
python
train
secdev/scapy
scapy/main.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/main.py#L336-L349
def update_session(fname=None): """Update current Scapy session from the file specified in the fname arg. params: - fname: file to load the scapy session from""" if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: s = six.moves.cPickle.load(open(fname, "rb")) scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.update(s) update_ipython_session(scapy_session)
[ "def", "update_session", "(", "fname", "=", "None", ")", ":", "if", "fname", "is", "None", ":", "fname", "=", "conf", ".", "session", "try", ":", "s", "=", "six", ".", "moves", ".", "cPickle", ".", "load", "(", "gzip", ".", "open", "(", "fname", ",", "\"rb\"", ")", ")", "except", "IOError", ":", "s", "=", "six", ".", "moves", ".", "cPickle", ".", "load", "(", "open", "(", "fname", ",", "\"rb\"", ")", ")", "scapy_session", "=", "six", ".", "moves", ".", "builtins", ".", "__dict__", "[", "\"scapy_session\"", "]", "scapy_session", ".", "update", "(", "s", ")", "update_ipython_session", "(", "scapy_session", ")" ]
Update current Scapy session from the file specified in the fname arg. params: - fname: file to load the scapy session from
[ "Update", "current", "Scapy", "session", "from", "the", "file", "specified", "in", "the", "fname", "arg", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/lin_2009.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/lin_2009.py#L67-L81
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ C = self.COEFFS[imt] mean = ( self._get_magnitude_term(C, rup.mag) + self._get_distance_term(C, rup.mag, dists.rrup) + self._get_style_of_faulting_term(C, rup.rake) + self._get_site_response_term(C, sites.vs30)) stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30)) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "mean", "=", "(", "self", ".", "_get_magnitude_term", "(", "C", ",", "rup", ".", "mag", ")", "+", "self", ".", "_get_distance_term", "(", "C", ",", "rup", ".", "mag", ",", "dists", ".", "rrup", ")", "+", "self", ".", "_get_style_of_faulting_term", "(", "C", ",", "rup", ".", "rake", ")", "+", "self", ".", "_get_site_response_term", "(", "C", ",", "sites", ".", "vs30", ")", ")", "stddevs", "=", "self", ".", "_get_stddevs", "(", "C", ",", "stddev_types", ",", "len", "(", "sites", ".", "vs30", ")", ")", "return", "mean", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
fuzeman/PyUPnP
pyupnp/logr.py
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/logr.py#L31-L48
def configure(level=logging.WARNING, handler=None, formatter=None): """Configure Logr @param handler: Logger message handler @type handler: logging.Handler or None @param formatter: Logger message Formatter @type formatter: logging.Formatter or None """ if formatter is None: formatter = LogrFormatter() if handler is None: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(level) Logr.handler = handler
[ "def", "configure", "(", "level", "=", "logging", ".", "WARNING", ",", "handler", "=", "None", ",", "formatter", "=", "None", ")", ":", "if", "formatter", "is", "None", ":", "formatter", "=", "LogrFormatter", "(", ")", "if", "handler", "is", "None", ":", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "handler", ".", "setLevel", "(", "level", ")", "Logr", ".", "handler", "=", "handler" ]
Configure Logr @param handler: Logger message handler @type handler: logging.Handler or None @param formatter: Logger message Formatter @type formatter: logging.Formatter or None
[ "Configure", "Logr" ]
python
train
pydata/xarray
xarray/backends/api.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/api.py#L829-L851
def dump_to_store(dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None): """Store dataset contents to a backends.*DataStore object.""" if writer is None: writer = ArrayWriter() if encoding is None: encoding = {} variables, attrs = conventions.encode_dataset_coordinates(dataset) check_encoding = set() for k, enc in encoding.items(): # no need to shallow copy the variable again; that already happened # in encode_dataset_coordinates variables[k].encoding = enc check_encoding.add(k) if encoder: variables, attrs = encoder(variables, attrs) store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)
[ "def", "dump_to_store", "(", "dataset", ",", "store", ",", "writer", "=", "None", ",", "encoder", "=", "None", ",", "encoding", "=", "None", ",", "unlimited_dims", "=", "None", ")", ":", "if", "writer", "is", "None", ":", "writer", "=", "ArrayWriter", "(", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "{", "}", "variables", ",", "attrs", "=", "conventions", ".", "encode_dataset_coordinates", "(", "dataset", ")", "check_encoding", "=", "set", "(", ")", "for", "k", ",", "enc", "in", "encoding", ".", "items", "(", ")", ":", "# no need to shallow copy the variable again; that already happened", "# in encode_dataset_coordinates", "variables", "[", "k", "]", ".", "encoding", "=", "enc", "check_encoding", ".", "add", "(", "k", ")", "if", "encoder", ":", "variables", ",", "attrs", "=", "encoder", "(", "variables", ",", "attrs", ")", "store", ".", "store", "(", "variables", ",", "attrs", ",", "check_encoding", ",", "writer", ",", "unlimited_dims", "=", "unlimited_dims", ")" ]
Store dataset contents to a backends.*DataStore object.
[ "Store", "dataset", "contents", "to", "a", "backends", ".", "*", "DataStore", "object", "." ]
python
train
saltstack/salt
salt/utils/boto3mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/boto3mod.py#L231-L237
def get_region(service, region, profile): """ Retrieve the region for a particular AWS service based on configured region and/or profile. """ _, region, _, _ = _get_profile(service, region, None, None, profile) return region
[ "def", "get_region", "(", "service", ",", "region", ",", "profile", ")", ":", "_", ",", "region", ",", "_", ",", "_", "=", "_get_profile", "(", "service", ",", "region", ",", "None", ",", "None", ",", "profile", ")", "return", "region" ]
Retrieve the region for a particular AWS service based on configured region and/or profile.
[ "Retrieve", "the", "region", "for", "a", "particular", "AWS", "service", "based", "on", "configured", "region", "and", "/", "or", "profile", "." ]
python
train
saltstack/salt
salt/modules/solr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L996-L1022
def signal(signal=None): ''' Signals Apache Solr to start, stop, or restart. Obviously this is only going to work if the minion resides on the solr host. Additionally Solr doesn't ship with an init script so one must be created. signal : str (None) The command to pass to the apache solr init valid values are 'start', 'stop', and 'restart' CLI Example: .. code-block:: bash salt '*' solr.signal restart ''' valid_signals = ('start', 'stop', 'restart') # Give a friendly error message for invalid signals # TODO: Fix this logic to be reusable and used by apache.signal if signal not in valid_signals: msg = valid_signals[:-1] + ('or {0}'.format(valid_signals[-1]),) return '{0} is an invalid signal. Try: one of: {1}'.format( signal, ', '.join(msg)) cmd = "{0} {1}".format(__opts__['solr.init_script'], signal) __salt__['cmd.run'](cmd, python_shell=False)
[ "def", "signal", "(", "signal", "=", "None", ")", ":", "valid_signals", "=", "(", "'start'", ",", "'stop'", ",", "'restart'", ")", "# Give a friendly error message for invalid signals", "# TODO: Fix this logic to be reusable and used by apache.signal", "if", "signal", "not", "in", "valid_signals", ":", "msg", "=", "valid_signals", "[", ":", "-", "1", "]", "+", "(", "'or {0}'", ".", "format", "(", "valid_signals", "[", "-", "1", "]", ")", ",", ")", "return", "'{0} is an invalid signal. Try: one of: {1}'", ".", "format", "(", "signal", ",", "', '", ".", "join", "(", "msg", ")", ")", "cmd", "=", "\"{0} {1}\"", ".", "format", "(", "__opts__", "[", "'solr.init_script'", "]", ",", "signal", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")" ]
Signals Apache Solr to start, stop, or restart. Obviously this is only going to work if the minion resides on the solr host. Additionally Solr doesn't ship with an init script so one must be created. signal : str (None) The command to pass to the apache solr init valid values are 'start', 'stop', and 'restart' CLI Example: .. code-block:: bash salt '*' solr.signal restart
[ "Signals", "Apache", "Solr", "to", "start", "stop", "or", "restart", ".", "Obviously", "this", "is", "only", "going", "to", "work", "if", "the", "minion", "resides", "on", "the", "solr", "host", ".", "Additionally", "Solr", "doesn", "t", "ship", "with", "an", "init", "script", "so", "one", "must", "be", "created", "." ]
python
train
SHDShim/pytheos
pytheos/eqn_vinet.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_vinet.py#L154-L163
def cal_k_vinet(p, k): """ calculate bulk modulus in GPa :param p: pressure in GPa :param k: [v0, k0, k0p] :return: bulk modulus at high pressure in GPa """ v = cal_v_vinet(p, k) return cal_k_vinet_from_v(v, k[0], k[1], k[2])
[ "def", "cal_k_vinet", "(", "p", ",", "k", ")", ":", "v", "=", "cal_v_vinet", "(", "p", ",", "k", ")", "return", "cal_k_vinet_from_v", "(", "v", ",", "k", "[", "0", "]", ",", "k", "[", "1", "]", ",", "k", "[", "2", "]", ")" ]
calculate bulk modulus in GPa :param p: pressure in GPa :param k: [v0, k0, k0p] :return: bulk modulus at high pressure in GPa
[ "calculate", "bulk", "modulus", "in", "GPa" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/profilehooks.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/profilehooks.py#L617-L622
def mark(self, lineno, count=1): """Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up. """ self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
[ "def", "mark", "(", "self", ",", "lineno", ",", "count", "=", "1", ")", ":", "self", ".", "sourcelines", "[", "lineno", "]", "=", "self", ".", "sourcelines", ".", "get", "(", "lineno", ",", "0", ")", "+", "count" ]
Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up.
[ "Mark", "a", "given", "source", "line", "as", "executed", "count", "times", "." ]
python
train
jayme-github/steam_idle
steam_idle/idle.py
https://github.com/jayme-github/steam_idle/blob/4f9b887fd6c3aea3baa9087f88ee739efcc150cc/steam_idle/idle.py#L53-L79
def calc_delay(remainingDrops): ''' Calculate the idle delay Minimum play time for cards to drop is ~20min again. Except for accounts that requested a refund? Re-check every 15 mintes if there are more than 1 card drops remaining. If only one drop remains, check every 5 minutes ''' global sameDelay, lastDelay # Reset lastDelay for new appids if remainingDrops > 1: lastDelay = 5 sameDelay = 0 if remainingDrops > 2: return 15 * 60 # Check every 15 minutes elif remainingDrops == 2: return 10 * 60 # Check every 10 minutes else: # decrease delay by one minute every two calls if lastDelay > 1: if sameDelay == 2: sameDelay = 0 lastDelay -= 1 sameDelay += 1 return lastDelay * 60
[ "def", "calc_delay", "(", "remainingDrops", ")", ":", "global", "sameDelay", ",", "lastDelay", "# Reset lastDelay for new appids", "if", "remainingDrops", ">", "1", ":", "lastDelay", "=", "5", "sameDelay", "=", "0", "if", "remainingDrops", ">", "2", ":", "return", "15", "*", "60", "# Check every 15 minutes", "elif", "remainingDrops", "==", "2", ":", "return", "10", "*", "60", "# Check every 10 minutes", "else", ":", "# decrease delay by one minute every two calls", "if", "lastDelay", ">", "1", ":", "if", "sameDelay", "==", "2", ":", "sameDelay", "=", "0", "lastDelay", "-=", "1", "sameDelay", "+=", "1", "return", "lastDelay", "*", "60" ]
Calculate the idle delay Minimum play time for cards to drop is ~20min again. Except for accounts that requested a refund? Re-check every 15 mintes if there are more than 1 card drops remaining. If only one drop remains, check every 5 minutes
[ "Calculate", "the", "idle", "delay", "Minimum", "play", "time", "for", "cards", "to", "drop", "is", "~20min", "again", ".", "Except", "for", "accounts", "that", "requested", "a", "refund?" ]
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L258-L277
def oldapi_request(self, method, endpoint, **kwargs): """Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs)
[ "def", "oldapi_request", "(", "self", ",", "method", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "headers", "[", "'Client-ID'", "]", "=", "CLIENT_ID", "# https://github.com/justintv/Twitch-API#rate-limits", "url", "=", "TWITCH_APIURL", "+", "endpoint", "return", "self", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")" ]
Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Make", "a", "request", "to", "one", "of", "the", "old", "api", "endpoints", "." ]
python
train
miso-belica/sumy
sumy/summarizers/edmundson_cue.py
https://github.com/miso-belica/sumy/blob/099ab4938e2c1b6a011297375586bac2953641b9/sumy/summarizers/edmundson_cue.py#L32-L50
def _count_words(self, words): """ Counts number of bonus/stigma words. :param iterable words: Collection of words. :returns pair: Tuple with number of words (bonus words, stigma words). """ bonus_words_count = 0 stigma_words_count = 0 for word in words: if word in self._bonus_words: bonus_words_count +=1 if word in self._stigma_words: stigma_words_count += 1 return bonus_words_count, stigma_words_count
[ "def", "_count_words", "(", "self", ",", "words", ")", ":", "bonus_words_count", "=", "0", "stigma_words_count", "=", "0", "for", "word", "in", "words", ":", "if", "word", "in", "self", ".", "_bonus_words", ":", "bonus_words_count", "+=", "1", "if", "word", "in", "self", ".", "_stigma_words", ":", "stigma_words_count", "+=", "1", "return", "bonus_words_count", ",", "stigma_words_count" ]
Counts number of bonus/stigma words. :param iterable words: Collection of words. :returns pair: Tuple with number of words (bonus words, stigma words).
[ "Counts", "number", "of", "bonus", "/", "stigma", "words", "." ]
python
train
google/openhtf
openhtf/util/logs.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L294-L315
def format(self, record): """Format the record as tersely as possible but preserve info.""" super(CliFormatter, self).format(record) localized_time = datetime.datetime.fromtimestamp(record.created) terse_time = localized_time.strftime(u'%H:%M:%S') terse_level = record.levelname[0] terse_name = record.name.split('.')[-1] match = RECORD_LOGGER_RE.match(record.name) if match: # Figure out which OpenHTF subsystem the record came from. subsys_match = SUBSYSTEM_LOGGER_RE.match(record.name) if subsys_match: terse_name = '<{subsys}: {id}>'.format( subsys=subsys_match.group('subsys'), id=subsys_match.group('id')) else: # Fall back to using the last five characters of the test UUID. terse_name = '<test %s>' % match.group('test_uid')[-5:] return '{lvl} {time} {logger} - {msg}'.format(lvl=terse_level, time=terse_time, logger=terse_name, msg=record.message)
[ "def", "format", "(", "self", ",", "record", ")", ":", "super", "(", "CliFormatter", ",", "self", ")", ".", "format", "(", "record", ")", "localized_time", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "record", ".", "created", ")", "terse_time", "=", "localized_time", ".", "strftime", "(", "u'%H:%M:%S'", ")", "terse_level", "=", "record", ".", "levelname", "[", "0", "]", "terse_name", "=", "record", ".", "name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "match", "=", "RECORD_LOGGER_RE", ".", "match", "(", "record", ".", "name", ")", "if", "match", ":", "# Figure out which OpenHTF subsystem the record came from.", "subsys_match", "=", "SUBSYSTEM_LOGGER_RE", ".", "match", "(", "record", ".", "name", ")", "if", "subsys_match", ":", "terse_name", "=", "'<{subsys}: {id}>'", ".", "format", "(", "subsys", "=", "subsys_match", ".", "group", "(", "'subsys'", ")", ",", "id", "=", "subsys_match", ".", "group", "(", "'id'", ")", ")", "else", ":", "# Fall back to using the last five characters of the test UUID.", "terse_name", "=", "'<test %s>'", "%", "match", ".", "group", "(", "'test_uid'", ")", "[", "-", "5", ":", "]", "return", "'{lvl} {time} {logger} - {msg}'", ".", "format", "(", "lvl", "=", "terse_level", ",", "time", "=", "terse_time", ",", "logger", "=", "terse_name", ",", "msg", "=", "record", ".", "message", ")" ]
Format the record as tersely as possible but preserve info.
[ "Format", "the", "record", "as", "tersely", "as", "possible", "but", "preserve", "info", "." ]
python
train
Azure/azure-cli-extensions
src/sqlvm-preview/azext_sqlvm_preview/_format.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/_format.py#L41-L68
def transform_sqlvm_output(result): ''' Transforms the result of SQL virtual machine group to eliminate unnecessary parameters. ''' from collections import OrderedDict from msrestazure.tools import parse_resource_id try: resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] # Create a dictionary with the relevant parameters output = OrderedDict([('id', result.id), ('location', result.location), ('name', result.name), ('provisioningState', result.provisioning_state), ('sqlImageOffer', result.sql_image_offer), ('sqlImageSku', result.sql_image_sku), ('resourceGroup', resource_group), ('sqlServerLicenseType', result.sql_server_license_type), ('virtualMachineResourceId', result.virtual_machine_resource_id), ('tags', result.tags)]) # Note, wsfcDomainCredentials will not display if result.sql_virtual_machine_group_resource_id is not None: output['sqlVirtualMachineGroupResourceId'] = result.sql_virtual_machine_group_resource_id return output except AttributeError: # Return the response object if the formating fails return result
[ "def", "transform_sqlvm_output", "(", "result", ")", ":", "from", "collections", "import", "OrderedDict", "from", "msrestazure", ".", "tools", "import", "parse_resource_id", "try", ":", "resource_group", "=", "getattr", "(", "result", ",", "'resource_group'", ",", "None", ")", "or", "parse_resource_id", "(", "result", ".", "id", ")", "[", "'resource_group'", "]", "# Create a dictionary with the relevant parameters", "output", "=", "OrderedDict", "(", "[", "(", "'id'", ",", "result", ".", "id", ")", ",", "(", "'location'", ",", "result", ".", "location", ")", ",", "(", "'name'", ",", "result", ".", "name", ")", ",", "(", "'provisioningState'", ",", "result", ".", "provisioning_state", ")", ",", "(", "'sqlImageOffer'", ",", "result", ".", "sql_image_offer", ")", ",", "(", "'sqlImageSku'", ",", "result", ".", "sql_image_sku", ")", ",", "(", "'resourceGroup'", ",", "resource_group", ")", ",", "(", "'sqlServerLicenseType'", ",", "result", ".", "sql_server_license_type", ")", ",", "(", "'virtualMachineResourceId'", ",", "result", ".", "virtual_machine_resource_id", ")", ",", "(", "'tags'", ",", "result", ".", "tags", ")", "]", ")", "# Note, wsfcDomainCredentials will not display", "if", "result", ".", "sql_virtual_machine_group_resource_id", "is", "not", "None", ":", "output", "[", "'sqlVirtualMachineGroupResourceId'", "]", "=", "result", ".", "sql_virtual_machine_group_resource_id", "return", "output", "except", "AttributeError", ":", "# Return the response object if the formating fails", "return", "result" ]
Transforms the result of SQL virtual machine group to eliminate unnecessary parameters.
[ "Transforms", "the", "result", "of", "SQL", "virtual", "machine", "group", "to", "eliminate", "unnecessary", "parameters", "." ]
python
train
ChrisCummins/labm8
system.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/system.py#L205-L214
def sed(match, replacement, path, modifiers=""): """ Perform sed text substitution. """ cmd = "sed -r -i 's/%s/%s/%s' %s" % (match, replacement, modifiers, path) process = Subprocess(cmd, shell=True) ret, out, err = process.run(timeout=60) if ret: raise SubprocessError("Sed command failed!")
[ "def", "sed", "(", "match", ",", "replacement", ",", "path", ",", "modifiers", "=", "\"\"", ")", ":", "cmd", "=", "\"sed -r -i 's/%s/%s/%s' %s\"", "%", "(", "match", ",", "replacement", ",", "modifiers", ",", "path", ")", "process", "=", "Subprocess", "(", "cmd", ",", "shell", "=", "True", ")", "ret", ",", "out", ",", "err", "=", "process", ".", "run", "(", "timeout", "=", "60", ")", "if", "ret", ":", "raise", "SubprocessError", "(", "\"Sed command failed!\"", ")" ]
Perform sed text substitution.
[ "Perform", "sed", "text", "substitution", "." ]
python
train
kalefranz/auxlib
auxlib/_vendor/boltons/timeutils.py
https://github.com/kalefranz/auxlib/blob/6ff2d6b57d128d0b9ed8f01ad83572e938da064f/auxlib/_vendor/boltons/timeutils.py#L122-L161
def parse_timedelta(text): """Robustly parses a short text description of a time period into a :class:`datetime.timedelta`. Supports weeks, days, hours, minutes, and seconds, with or without decimal points: Args: text (str): Text to parse. Returns: datetime.timedelta Raises: ValueError: on parse failure. >>> parse_td('1d 2h 3.5m 0s') datetime.timedelta(1, 7410) Also supports full words and whitespace. >>> parse_td('2 weeks 1 day') datetime.timedelta(15) Negative times are supported, too: >>> parse_td('-1.5 weeks 3m 20s') datetime.timedelta(-11, 43400) """ td_kwargs = {} for match in _PARSE_TD_RE.finditer(text): value, unit = match.group('value'), match.group('unit') try: unit_key = _PARSE_TD_KW_MAP[unit] except KeyError: raise ValueError('invalid time unit %r, expected one of %r' % (unit, _PARSE_TD_KW_MAP.keys())) try: value = float(value) except ValueError: raise ValueError('invalid time value for unit %r: %r' % (unit, value)) td_kwargs[unit_key] = value return timedelta(**td_kwargs)
[ "def", "parse_timedelta", "(", "text", ")", ":", "td_kwargs", "=", "{", "}", "for", "match", "in", "_PARSE_TD_RE", ".", "finditer", "(", "text", ")", ":", "value", ",", "unit", "=", "match", ".", "group", "(", "'value'", ")", ",", "match", ".", "group", "(", "'unit'", ")", "try", ":", "unit_key", "=", "_PARSE_TD_KW_MAP", "[", "unit", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'invalid time unit %r, expected one of %r'", "%", "(", "unit", ",", "_PARSE_TD_KW_MAP", ".", "keys", "(", ")", ")", ")", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'invalid time value for unit %r: %r'", "%", "(", "unit", ",", "value", ")", ")", "td_kwargs", "[", "unit_key", "]", "=", "value", "return", "timedelta", "(", "*", "*", "td_kwargs", ")" ]
Robustly parses a short text description of a time period into a :class:`datetime.timedelta`. Supports weeks, days, hours, minutes, and seconds, with or without decimal points: Args: text (str): Text to parse. Returns: datetime.timedelta Raises: ValueError: on parse failure. >>> parse_td('1d 2h 3.5m 0s') datetime.timedelta(1, 7410) Also supports full words and whitespace. >>> parse_td('2 weeks 1 day') datetime.timedelta(15) Negative times are supported, too: >>> parse_td('-1.5 weeks 3m 20s') datetime.timedelta(-11, 43400)
[ "Robustly", "parses", "a", "short", "text", "description", "of", "a", "time", "period", "into", "a", ":", "class", ":", "datetime", ".", "timedelta", ".", "Supports", "weeks", "days", "hours", "minutes", "and", "seconds", "with", "or", "without", "decimal", "points", ":" ]
python
train
CalebBell/fluids
fluids/geometry.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L693-L782
def V_vertical_torispherical(D, f, k, h): r'''Calculates volume of a vertical tank with a convex torispherical bottom, according to [1]_. No provision for the top of the tank is made here. .. math:: V_f = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right),\; 0 \le h \le a_1 .. math:: V_f = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right) +\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right] + \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[ \frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4} + \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right] ,\; a_1 < h \le a_1 + a_2 .. math:: V_f = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right) +\frac{\pi t}{2}\left[\left(\frac{D}{2}-kD\right)^2 +s\right] +\frac{\pi t^3}{12} + \pi D(1-2k)\left[\frac{t\sqrt{s}}{4} + \frac{k^2D^2}{2}\sin^{-1}(\cos\alpha)\right] + \frac{\pi D^2}{4}[h-(a_1+a_2)] ,\; a_1 + a_2 < h .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha .. math:: D_1 = 2fD\sin\alpha .. math:: s = (kD\sin\alpha)^2 .. math:: t = 2a_2 .. math:: u = h - fD(1-\cos\alpha) Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] h : float Height, as measured up to where the fluid ends, [m] Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_vertical_torispherical(D=132., f=1.0, k=0.06, h=24)/231. 904.0688283793511 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF''' alpha = asin((1-2*k)/(2*(f-k))) a1 = f*D*(1 - cos(alpha)) a2 = k*D*cos(alpha) D1 = 2*f*D*sin(alpha) s = (k*D*sin(alpha))**2 t = 2*a2 u = h - f*D*(1 - cos(alpha)) if 0 <= h <= a1: Vf = pi*h**2/4*(2*a1 + D1**2/2/a1 - 4*h/3) elif a1 < h <= a1 + a2: Vf = (pi/4*(2*a1**3/3 + a1*D1**2/2.) + pi*u*((D/2. - k*D)**2 + s) + pi*t*u**2/2. - pi*u**3/3. + pi*D*(1 - 2*k)*((2*u-t)/4.*(s + t*u - u**2)**0.5 + t*s**0.5/4. + k**2*D**2/2*(acos((t-2*u)/(2*k*D))-alpha))) else: Vf = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*t/2.*((D/2 - k*D)**2 + s) + pi*t**3/12. + pi*D*(1 - 2*k)*(t*s**0.5/4 + k**2*D**2/2*asin(cos(alpha))) + pi*D**2/4*(h - (a1 + a2)) return Vf
[ "def", "V_vertical_torispherical", "(", "D", ",", "f", ",", "k", ",", "h", ")", ":", "alpha", "=", "asin", "(", "(", "1", "-", "2", "*", "k", ")", "/", "(", "2", "*", "(", "f", "-", "k", ")", ")", ")", "a1", "=", "f", "*", "D", "*", "(", "1", "-", "cos", "(", "alpha", ")", ")", "a2", "=", "k", "*", "D", "*", "cos", "(", "alpha", ")", "D1", "=", "2", "*", "f", "*", "D", "*", "sin", "(", "alpha", ")", "s", "=", "(", "k", "*", "D", "*", "sin", "(", "alpha", ")", ")", "**", "2", "t", "=", "2", "*", "a2", "u", "=", "h", "-", "f", "*", "D", "*", "(", "1", "-", "cos", "(", "alpha", ")", ")", "if", "0", "<=", "h", "<=", "a1", ":", "Vf", "=", "pi", "*", "h", "**", "2", "/", "4", "*", "(", "2", "*", "a1", "+", "D1", "**", "2", "/", "2", "/", "a1", "-", "4", "*", "h", "/", "3", ")", "elif", "a1", "<", "h", "<=", "a1", "+", "a2", ":", "Vf", "=", "(", "pi", "/", "4", "*", "(", "2", "*", "a1", "**", "3", "/", "3", "+", "a1", "*", "D1", "**", "2", "/", "2.", ")", "+", "pi", "*", "u", "*", "(", "(", "D", "/", "2.", "-", "k", "*", "D", ")", "**", "2", "+", "s", ")", "+", "pi", "*", "t", "*", "u", "**", "2", "/", "2.", "-", "pi", "*", "u", "**", "3", "/", "3.", "+", "pi", "*", "D", "*", "(", "1", "-", "2", "*", "k", ")", "*", "(", "(", "2", "*", "u", "-", "t", ")", "/", "4.", "*", "(", "s", "+", "t", "*", "u", "-", "u", "**", "2", ")", "**", "0.5", "+", "t", "*", "s", "**", "0.5", "/", "4.", "+", "k", "**", "2", "*", "D", "**", "2", "/", "2", "*", "(", "acos", "(", "(", "t", "-", "2", "*", "u", ")", "/", "(", "2", "*", "k", "*", "D", ")", ")", "-", "alpha", ")", ")", ")", "else", ":", "Vf", "=", "pi", "/", "4", "*", "(", "2", "*", "a1", "**", "3", "/", "3.", "+", "a1", "*", "D1", "**", "2", "/", "2.", ")", "+", "pi", "*", "t", "/", "2.", "*", "(", "(", "D", "/", "2", "-", "k", "*", "D", ")", "**", "2", "+", "s", ")", "+", "pi", "*", "t", "**", "3", "/", "12.", "+", "pi", "*", "D", "*", "(", "1", "-", "2", "*", "k", ")", "*", "(", "t", "*", "s", "**", "0.5", "/", "4", "+", "k", "**", "2", "*", "D", "**", "2", "/", "2", "*", "asin", "(", "cos", "(", "alpha", ")", ")", ")", "+", "pi", "*", "D", "**", "2", "/", "4", "*", "(", "h", "-", "(", "a1", "+", "a2", ")", ")", "return", "Vf" ]
r'''Calculates volume of a vertical tank with a convex torispherical bottom, according to [1]_. No provision for the top of the tank is made here. .. math:: V_f = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right),\; 0 \le h \le a_1 .. math:: V_f = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right) +\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right] + \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[ \frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4} + \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right] ,\; a_1 < h \le a_1 + a_2 .. math:: V_f = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right) +\frac{\pi t}{2}\left[\left(\frac{D}{2}-kD\right)^2 +s\right] +\frac{\pi t^3}{12} + \pi D(1-2k)\left[\frac{t\sqrt{s}}{4} + \frac{k^2D^2}{2}\sin^{-1}(\cos\alpha)\right] + \frac{\pi D^2}{4}[h-(a_1+a_2)] ,\; a_1 + a_2 < h .. math:: \alpha = \sin^{-1}\frac{1-2k}{2(f-k)} .. math:: a_1 = fD(1-\cos\alpha) .. math:: a_2 = kD\cos\alpha .. math:: D_1 = 2fD\sin\alpha .. math:: s = (kD\sin\alpha)^2 .. math:: t = 2a_2 .. math:: u = h - fD(1-\cos\alpha) Parameters ---------- D : float Diameter of the main cylindrical section, [m] f : float Dish-radius parameter; fD = dish radius [1/m] k : float knuckle-radius parameter ; kD = knuckle radius [1/m] h : float Height, as measured up to where the fluid ends, [m] Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_vertical_torispherical(D=132., f=1.0, k=0.06, h=24)/231. 904.0688283793511 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
[ "r", "Calculates", "volume", "of", "a", "vertical", "tank", "with", "a", "convex", "torispherical", "bottom", "according", "to", "[", "1", "]", "_", ".", "No", "provision", "for", "the", "top", "of", "the", "tank", "is", "made", "here", "." ]
python
train
woolfson-group/isambard
isambard/optimisation/optimizer.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L89-L103
def comparator_eval(comparator_params): """Gets BUFF score for interaction between two AMPAL objects """ top1, top2, params1, params2, seq1, seq2, movements = comparator_params xrot, yrot, zrot, xtrans, ytrans, ztrans = movements obj1 = top1(*params1) obj2 = top2(*params2) obj2.rotate(xrot, [1, 0, 0]) obj2.rotate(yrot, [0, 1, 0]) obj2.rotate(zrot, [0, 0, 1]) obj2.translate([xtrans, ytrans, ztrans]) model = obj1 + obj2 model.relabel_all() model.pack_new_sequences(seq1 + seq2) return model.buff_interaction_energy.total_energy
[ "def", "comparator_eval", "(", "comparator_params", ")", ":", "top1", ",", "top2", ",", "params1", ",", "params2", ",", "seq1", ",", "seq2", ",", "movements", "=", "comparator_params", "xrot", ",", "yrot", ",", "zrot", ",", "xtrans", ",", "ytrans", ",", "ztrans", "=", "movements", "obj1", "=", "top1", "(", "*", "params1", ")", "obj2", "=", "top2", "(", "*", "params2", ")", "obj2", ".", "rotate", "(", "xrot", ",", "[", "1", ",", "0", ",", "0", "]", ")", "obj2", ".", "rotate", "(", "yrot", ",", "[", "0", ",", "1", ",", "0", "]", ")", "obj2", ".", "rotate", "(", "zrot", ",", "[", "0", ",", "0", ",", "1", "]", ")", "obj2", ".", "translate", "(", "[", "xtrans", ",", "ytrans", ",", "ztrans", "]", ")", "model", "=", "obj1", "+", "obj2", "model", ".", "relabel_all", "(", ")", "model", ".", "pack_new_sequences", "(", "seq1", "+", "seq2", ")", "return", "model", ".", "buff_interaction_energy", ".", "total_energy" ]
Gets BUFF score for interaction between two AMPAL objects
[ "Gets", "BUFF", "score", "for", "interaction", "between", "two", "AMPAL", "objects" ]
python
train
fermiPy/fermipy
fermipy/hpx_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L122-L155
def make_hpx_to_wcs_mapping_centers(hpx, wcs): """ Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel -1 indicates the wcs pixel does not contain the center of a HEALpix pixel mult_val : array(nx,ny) of 1. npix : tuple(nx,ny) with the shape of the wcs grid """ npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2)) mult_val = np.ones(npix).T.flatten() sky_crds = hpx.get_sky_coords() pix_crds = wcs.wcs_world2pix(sky_crds, 0).astype(int) ipixs = -1 * np.ones(npix, int).T.flatten() pix_index = npix[1] * pix_crds[0:, 0] + pix_crds[0:, 1] if hpx._ipix is None: for ipix, pix_crd in enumerate(pix_index): ipixs[pix_crd] = ipix else: for pix_crd, ipix in zip(pix_index, hpx._ipix): ipixs[pix_crd] = ipix ipixs = ipixs.reshape(npix).T.flatten() return ipixs, mult_val, npix
[ "def", "make_hpx_to_wcs_mapping_centers", "(", "hpx", ",", "wcs", ")", ":", "npix", "=", "(", "int", "(", "wcs", ".", "wcs", ".", "crpix", "[", "0", "]", "*", "2", ")", ",", "int", "(", "wcs", ".", "wcs", ".", "crpix", "[", "1", "]", "*", "2", ")", ")", "mult_val", "=", "np", ".", "ones", "(", "npix", ")", ".", "T", ".", "flatten", "(", ")", "sky_crds", "=", "hpx", ".", "get_sky_coords", "(", ")", "pix_crds", "=", "wcs", ".", "wcs_world2pix", "(", "sky_crds", ",", "0", ")", ".", "astype", "(", "int", ")", "ipixs", "=", "-", "1", "*", "np", ".", "ones", "(", "npix", ",", "int", ")", ".", "T", ".", "flatten", "(", ")", "pix_index", "=", "npix", "[", "1", "]", "*", "pix_crds", "[", "0", ":", ",", "0", "]", "+", "pix_crds", "[", "0", ":", ",", "1", "]", "if", "hpx", ".", "_ipix", "is", "None", ":", "for", "ipix", ",", "pix_crd", "in", "enumerate", "(", "pix_index", ")", ":", "ipixs", "[", "pix_crd", "]", "=", "ipix", "else", ":", "for", "pix_crd", ",", "ipix", "in", "zip", "(", "pix_index", ",", "hpx", ".", "_ipix", ")", ":", "ipixs", "[", "pix_crd", "]", "=", "ipix", "ipixs", "=", "ipixs", ".", "reshape", "(", "npix", ")", ".", "T", ".", "flatten", "(", ")", "return", "ipixs", ",", "mult_val", ",", "npix" ]
Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel -1 indicates the wcs pixel does not contain the center of a HEALpix pixel mult_val : array(nx,ny) of 1. npix : tuple(nx,ny) with the shape of the wcs grid
[ "Make", "the", "mapping", "data", "needed", "to", "from", "from", "HPX", "pixelization", "to", "a", "WCS", "-", "based", "array" ]
python
train
isambard-uob/ampal
src/ampal/naccess.py
https://github.com/isambard-uob/ampal/blob/906e2afacb435ffb129b381f262ff8e7bfb324c5/src/ampal/naccess.py#L8-L19
def naccess_available(): """True if naccess is available on the path.""" available = False try: subprocess.check_output(['naccess'], stderr=subprocess.DEVNULL) except subprocess.CalledProcessError: available = True except FileNotFoundError: print("naccess has not been found on your path. If you have already " "installed naccess but are unsure how to add it to your path, " "check out this: https://stackoverflow.com/a/14638025") return available
[ "def", "naccess_available", "(", ")", ":", "available", "=", "False", "try", ":", "subprocess", ".", "check_output", "(", "[", "'naccess'", "]", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "except", "subprocess", ".", "CalledProcessError", ":", "available", "=", "True", "except", "FileNotFoundError", ":", "print", "(", "\"naccess has not been found on your path. If you have already \"", "\"installed naccess but are unsure how to add it to your path, \"", "\"check out this: https://stackoverflow.com/a/14638025\"", ")", "return", "available" ]
True if naccess is available on the path.
[ "True", "if", "naccess", "is", "available", "on", "the", "path", "." ]
python
train
adamrehn/ue4cli
ue4cli/UnrealManagerBase.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L427-L449
def runAutomationCommands(self, projectFile, commands, capture=False): ''' Invokes the Automation Test commandlet for the specified project with the supplied automation test commands ''' # IMPORTANT IMPLEMENTATION NOTE: # We need to format the command as a string and execute it using a shell in order to # ensure the "-ExecCmds" argument will be parsed correctly under Windows. This is because # the WinMain() function uses GetCommandLineW() to retrieve the raw command-line string, # rather than using an argv-style structure. The string is then passed to FParse::Value(), # which checks for the presence of a quote character after the equals sign to determine if # whitespace should be stripped or preserved. Without the quote character, the spaces in the # argument payload will be stripped out, corrupting our list of automation commands and # preventing them from executing correctly. command = '{} {}'.format(Utility.escapePathForShell(self.getEditorBinary(True)), Utility.escapePathForShell(projectFile)) command += ' -game -buildmachine -stdout -fullstdoutlogoutput -forcelogflush -unattended -nopause -nullrhi -nosplash' command += ' -ExecCmds="automation {};quit"'.format(';'.join(commands)) if capture == True: return Utility.capture(command, shell=True) else: Utility.run(command, shell=True)
[ "def", "runAutomationCommands", "(", "self", ",", "projectFile", ",", "commands", ",", "capture", "=", "False", ")", ":", "# IMPORTANT IMPLEMENTATION NOTE:", "# We need to format the command as a string and execute it using a shell in order to", "# ensure the \"-ExecCmds\" argument will be parsed correctly under Windows. This is because", "# the WinMain() function uses GetCommandLineW() to retrieve the raw command-line string,", "# rather than using an argv-style structure. The string is then passed to FParse::Value(),", "# which checks for the presence of a quote character after the equals sign to determine if", "# whitespace should be stripped or preserved. Without the quote character, the spaces in the", "# argument payload will be stripped out, corrupting our list of automation commands and", "# preventing them from executing correctly.", "command", "=", "'{} {}'", ".", "format", "(", "Utility", ".", "escapePathForShell", "(", "self", ".", "getEditorBinary", "(", "True", ")", ")", ",", "Utility", ".", "escapePathForShell", "(", "projectFile", ")", ")", "command", "+=", "' -game -buildmachine -stdout -fullstdoutlogoutput -forcelogflush -unattended -nopause -nullrhi -nosplash'", "command", "+=", "' -ExecCmds=\"automation {};quit\"'", ".", "format", "(", "';'", ".", "join", "(", "commands", ")", ")", "if", "capture", "==", "True", ":", "return", "Utility", ".", "capture", "(", "command", ",", "shell", "=", "True", ")", "else", ":", "Utility", ".", "run", "(", "command", ",", "shell", "=", "True", ")" ]
Invokes the Automation Test commandlet for the specified project with the supplied automation test commands
[ "Invokes", "the", "Automation", "Test", "commandlet", "for", "the", "specified", "project", "with", "the", "supplied", "automation", "test", "commands" ]
python
train
twitterdev/search-tweets-python
searchtweets/api_utils.py
https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/api_utils.py#L86-L138
def gen_rule_payload(pt_rule, results_per_call=None, from_date=None, to_date=None, count_bucket=None, tag=None, stringify=True): """ Generates the dict or json payload for a PowerTrack rule. Args: pt_rule (str): The string version of a powertrack rule, e.g., "beyonce has:geo". Accepts multi-line strings for ease of entry. results_per_call (int): number of tweets or counts returned per API call. This maps to the ``maxResults`` search API parameter. Defaults to 500 to reduce API call usage. from_date (str or None): Date format as specified by `convert_utc_time` for the starting time of your search. to_date (str or None): date format as specified by `convert_utc_time` for the end time of your search. count_bucket (str or None): If using the counts api endpoint, will define the count bucket for which tweets are aggregated. stringify (bool): specifies the return type, `dict` or json-formatted `str`. Example: >>> from searchtweets.utils import gen_rule_payload >>> gen_rule_payload("beyonce has:geo", ... from_date="2017-08-21", ... to_date="2017-08-22") '{"query":"beyonce has:geo","maxResults":100,"toDate":"201708220000","fromDate":"201708210000"}' """ pt_rule = ' '.join(pt_rule.split()) # allows multi-line strings payload = {"query": pt_rule} if results_per_call is not None and isinstance(results_per_call, int) is True: payload["maxResults"] = results_per_call if to_date: payload["toDate"] = convert_utc_time(to_date) if from_date: payload["fromDate"] = convert_utc_time(from_date) if count_bucket: if set(["day", "hour", "minute"]) & set([count_bucket]): payload["bucket"] = count_bucket del payload["maxResults"] else: logger.error("invalid count bucket: provided {}" .format(count_bucket)) raise ValueError if tag: payload["tag"] = tag return json.dumps(payload) if stringify else payload
[ "def", "gen_rule_payload", "(", "pt_rule", ",", "results_per_call", "=", "None", ",", "from_date", "=", "None", ",", "to_date", "=", "None", ",", "count_bucket", "=", "None", ",", "tag", "=", "None", ",", "stringify", "=", "True", ")", ":", "pt_rule", "=", "' '", ".", "join", "(", "pt_rule", ".", "split", "(", ")", ")", "# allows multi-line strings", "payload", "=", "{", "\"query\"", ":", "pt_rule", "}", "if", "results_per_call", "is", "not", "None", "and", "isinstance", "(", "results_per_call", ",", "int", ")", "is", "True", ":", "payload", "[", "\"maxResults\"", "]", "=", "results_per_call", "if", "to_date", ":", "payload", "[", "\"toDate\"", "]", "=", "convert_utc_time", "(", "to_date", ")", "if", "from_date", ":", "payload", "[", "\"fromDate\"", "]", "=", "convert_utc_time", "(", "from_date", ")", "if", "count_bucket", ":", "if", "set", "(", "[", "\"day\"", ",", "\"hour\"", ",", "\"minute\"", "]", ")", "&", "set", "(", "[", "count_bucket", "]", ")", ":", "payload", "[", "\"bucket\"", "]", "=", "count_bucket", "del", "payload", "[", "\"maxResults\"", "]", "else", ":", "logger", ".", "error", "(", "\"invalid count bucket: provided {}\"", ".", "format", "(", "count_bucket", ")", ")", "raise", "ValueError", "if", "tag", ":", "payload", "[", "\"tag\"", "]", "=", "tag", "return", "json", ".", "dumps", "(", "payload", ")", "if", "stringify", "else", "payload" ]
Generates the dict or json payload for a PowerTrack rule. Args: pt_rule (str): The string version of a powertrack rule, e.g., "beyonce has:geo". Accepts multi-line strings for ease of entry. results_per_call (int): number of tweets or counts returned per API call. This maps to the ``maxResults`` search API parameter. Defaults to 500 to reduce API call usage. from_date (str or None): Date format as specified by `convert_utc_time` for the starting time of your search. to_date (str or None): date format as specified by `convert_utc_time` for the end time of your search. count_bucket (str or None): If using the counts api endpoint, will define the count bucket for which tweets are aggregated. stringify (bool): specifies the return type, `dict` or json-formatted `str`. Example: >>> from searchtweets.utils import gen_rule_payload >>> gen_rule_payload("beyonce has:geo", ... from_date="2017-08-21", ... to_date="2017-08-22") '{"query":"beyonce has:geo","maxResults":100,"toDate":"201708220000","fromDate":"201708210000"}'
[ "Generates", "the", "dict", "or", "json", "payload", "for", "a", "PowerTrack", "rule", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L164-L170
def _chunk_offsets(self): """Iterator over chunk offests.""" index = 0 blob_size = self.blob_properties.get('content-length') while index < blob_size: yield index index = index + self._chunk_size
[ "def", "_chunk_offsets", "(", "self", ")", ":", "index", "=", "0", "blob_size", "=", "self", ".", "blob_properties", ".", "get", "(", "'content-length'", ")", "while", "index", "<", "blob_size", ":", "yield", "index", "index", "=", "index", "+", "self", ".", "_chunk_size" ]
Iterator over chunk offests.
[ "Iterator", "over", "chunk", "offests", "." ]
python
train
i3visio/osrframework
osrframework/utils/general.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L448-L594
def _generateGraphData(data, oldData=nx.Graph()): """ Processing the data from i3visio structures to generate nodes and edges This function uses the networkx graph library. It will create a new node for each and i3visio.<something> entities while it will add properties for all the attribute starting with "@". Args: ----- d: The i3visio structures containing a list of oldData: A graph structure representing the previous information. Returns: -------- A graph structure representing the updated information. """ def _addNewNode(ent, g): """ Wraps the creation of a node Args: ----- ent: The hi3visio-like entities to be used as the identifier. ent = { "value":"i3visio", "type":"i3visio.alias, } g: The graph in which the entity will be stored. Returns: ------- The label used to represent this element. """ try: label = unicode(ent["value"]) except UnicodeEncodeError as e: # Printing that an error was found label = str(ent["value"]) g.add_node(label) g.node[label]["type"] = ent["type"] return label def _processAttributes(elems, g): """ Function that processes a list of elements to obtain new attributes. Args: ----- elems: List of i3visio-like entities. g: The graph in which the entity will be stored. Returns: -------- newAtts: Dict of attributes (to be stored as attributes for the given entity). newEntities: List of new Entities (to be stored as attributes for the given entity). """ newAtts = {} newEntities= [] for att in elems: # If it is an attribute if att["type"][0] == "@": # Removing the @ and the _ of the attributes attName = str(att["type"][1:]).replace('_', '') try: newAtts[attName] = int(att["value"]) except: newAtts[attName] = att["value"] elif att["type"][:8] == "i3visio.": # Creating a dict to represent the pair: type, value entity. ent = { "value":att["value"], "type":att["type"].replace("i3visio.", "i3visio_"), } # Appending the new Entity to the entity list newEntities.append(ent) # Appending the new node hashLabel = _addNewNode(ent, g) # Make this recursive to link the attributes in each and every att newAttsInAttributes, newEntitiesInAttributes = _processAttributes(att["attributes"], g) # Updating the attributes to the current entity g.node[hashLabel].update(newAttsInAttributes) # Creating the edges (the new entities have also been created in the _processAttributes for new in newEntitiesInAttributes: graphData.add_edge(hashLabel, json.dumps(new)) try: # Here, we would add the properties of the edge #graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] +=1 pass except: # If the attribute does not exist, we would initialize it #graphData.edge[hashLabel][json.dumps(new)]["@times_seen"] = 1 pass else: # An unexpected type pass return newAtts, newEntities graphData = oldData # Iterating through the results for elem in data: # Creating a dict to represent the pair: type, value entity. ent = { "value":elem["value"], "type":elem["type"], } # Appending the new node new_node = _addNewNode(ent, graphData) # Processing the attributes to grab the attributes (starting with "@..." and entities) newAtts, newEntities = _processAttributes(elem["attributes"], graphData) # Updating the attributes to the current entity graphData.node[new_node].update(newAtts) # Creating the edges (the new entities have also been created in the _processAttributes for other_node in newEntities: # Serializing the second entity serEnt = json.dumps(new_node) try: other_node = unicode(other_node["value"]) except UnicodeEncodeError as e: # Printing that an error was found other_node = str(other_node["value"]) # Adding the edge graphData.add_edge(new_node, other_node) try: # Here, we would add the properties of the edge #graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] +=1 pass except: # If the attribute does not exist, we would initialize it #graphData.edge[hashLabel][hashLabelSeconds]["times_seen"] = 1 pass return graphData
[ "def", "_generateGraphData", "(", "data", ",", "oldData", "=", "nx", ".", "Graph", "(", ")", ")", ":", "def", "_addNewNode", "(", "ent", ",", "g", ")", ":", "\"\"\"\n Wraps the creation of a node\n\n Args:\n -----\n ent: The hi3visio-like entities to be used as the identifier.\n ent = {\n \"value\":\"i3visio\",\n \"type\":\"i3visio.alias,\n }\n g: The graph in which the entity will be stored.\n\n Returns:\n -------\n The label used to represent this element.\n \"\"\"", "try", ":", "label", "=", "unicode", "(", "ent", "[", "\"value\"", "]", ")", "except", "UnicodeEncodeError", "as", "e", ":", "# Printing that an error was found", "label", "=", "str", "(", "ent", "[", "\"value\"", "]", ")", "g", ".", "add_node", "(", "label", ")", "g", ".", "node", "[", "label", "]", "[", "\"type\"", "]", "=", "ent", "[", "\"type\"", "]", "return", "label", "def", "_processAttributes", "(", "elems", ",", "g", ")", ":", "\"\"\"\n Function that processes a list of elements to obtain new attributes.\n\n Args:\n -----\n elems: List of i3visio-like entities.\n g: The graph in which the entity will be stored.\n\n Returns:\n --------\n newAtts: Dict of attributes (to be stored as attributes for the\n given entity).\n newEntities: List of new Entities (to be stored as attributes for\n the given entity).\n \"\"\"", "newAtts", "=", "{", "}", "newEntities", "=", "[", "]", "for", "att", "in", "elems", ":", "# If it is an attribute", "if", "att", "[", "\"type\"", "]", "[", "0", "]", "==", "\"@\"", ":", "# Removing the @ and the _ of the attributes", "attName", "=", "str", "(", "att", "[", "\"type\"", "]", "[", "1", ":", "]", ")", ".", "replace", "(", "'_'", ",", "''", ")", "try", ":", "newAtts", "[", "attName", "]", "=", "int", "(", "att", "[", "\"value\"", "]", ")", "except", ":", "newAtts", "[", "attName", "]", "=", "att", "[", "\"value\"", "]", "elif", "att", "[", "\"type\"", "]", "[", ":", "8", "]", "==", "\"i3visio.\"", ":", "# Creating a dict to represent the pair: type, value entity.", "ent", "=", "{", "\"value\"", ":", "att", "[", "\"value\"", "]", ",", "\"type\"", ":", "att", "[", "\"type\"", "]", ".", "replace", "(", "\"i3visio.\"", ",", "\"i3visio_\"", ")", ",", "}", "# Appending the new Entity to the entity list", "newEntities", ".", "append", "(", "ent", ")", "# Appending the new node", "hashLabel", "=", "_addNewNode", "(", "ent", ",", "g", ")", "# Make this recursive to link the attributes in each and every att", "newAttsInAttributes", ",", "newEntitiesInAttributes", "=", "_processAttributes", "(", "att", "[", "\"attributes\"", "]", ",", "g", ")", "# Updating the attributes to the current entity", "g", ".", "node", "[", "hashLabel", "]", ".", "update", "(", "newAttsInAttributes", ")", "# Creating the edges (the new entities have also been created in the _processAttributes", "for", "new", "in", "newEntitiesInAttributes", ":", "graphData", ".", "add_edge", "(", "hashLabel", ",", "json", ".", "dumps", "(", "new", ")", ")", "try", ":", "# Here, we would add the properties of the edge", "#graphData.edge[hashLabel][json.dumps(new)][\"@times_seen\"] +=1", "pass", "except", ":", "# If the attribute does not exist, we would initialize it", "#graphData.edge[hashLabel][json.dumps(new)][\"@times_seen\"] = 1", "pass", "else", ":", "# An unexpected type", "pass", "return", "newAtts", ",", "newEntities", "graphData", "=", "oldData", "# Iterating through the results", "for", "elem", "in", "data", ":", "# Creating a dict to represent the pair: type, value entity.", "ent", "=", "{", "\"value\"", ":", "elem", "[", "\"value\"", "]", ",", "\"type\"", ":", "elem", "[", "\"type\"", "]", ",", "}", "# Appending the new node", "new_node", "=", "_addNewNode", "(", "ent", ",", "graphData", ")", "# Processing the attributes to grab the attributes (starting with \"@...\" and entities)", "newAtts", ",", "newEntities", "=", "_processAttributes", "(", "elem", "[", "\"attributes\"", "]", ",", "graphData", ")", "# Updating the attributes to the current entity", "graphData", ".", "node", "[", "new_node", "]", ".", "update", "(", "newAtts", ")", "# Creating the edges (the new entities have also been created in the _processAttributes", "for", "other_node", "in", "newEntities", ":", "# Serializing the second entity", "serEnt", "=", "json", ".", "dumps", "(", "new_node", ")", "try", ":", "other_node", "=", "unicode", "(", "other_node", "[", "\"value\"", "]", ")", "except", "UnicodeEncodeError", "as", "e", ":", "# Printing that an error was found", "other_node", "=", "str", "(", "other_node", "[", "\"value\"", "]", ")", "# Adding the edge", "graphData", ".", "add_edge", "(", "new_node", ",", "other_node", ")", "try", ":", "# Here, we would add the properties of the edge", "#graphData.edge[hashLabel][hashLabelSeconds][\"times_seen\"] +=1", "pass", "except", ":", "# If the attribute does not exist, we would initialize it", "#graphData.edge[hashLabel][hashLabelSeconds][\"times_seen\"] = 1", "pass", "return", "graphData" ]
Processing the data from i3visio structures to generate nodes and edges This function uses the networkx graph library. It will create a new node for each and i3visio.<something> entities while it will add properties for all the attribute starting with "@". Args: ----- d: The i3visio structures containing a list of oldData: A graph structure representing the previous information. Returns: -------- A graph structure representing the updated information.
[ "Processing", "the", "data", "from", "i3visio", "structures", "to", "generate", "nodes", "and", "edges" ]
python
train
codenerix/django-codenerix-invoicing
codenerix_invoicing/views_sales.py
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L2190-L2199
def dispatch(self, *args, **kwargs): self.__line_pk = kwargs.get('pk', None) """ if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists(): self.form_class = LineBasketFormPack self.__is_pack = True else: self.__is_pack = False """ return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "__line_pk", "=", "kwargs", ".", "get", "(", "'pk'", ",", "None", ")", "return", "super", "(", "LinesUpdateModalBasket", ",", "self", ")", ".", "dispatch", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists(): self.form_class = LineBasketFormPack self.__is_pack = True else: self.__is_pack = False
[ "if", "SalesLineBasketOption", ".", "objects", ".", "filter", "(", "line_budget__pk", "=", "self", ".", "__line_pk", ")", ".", "exists", "()", ":", "self", ".", "form_class", "=", "LineBasketFormPack", "self", ".", "__is_pack", "=", "True", "else", ":", "self", ".", "__is_pack", "=", "False" ]
python
train
gccxml/pygccxml
pygccxml/declarations/scopedef.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/scopedef.py#L351-L357
def __normalize_args(**keywds): """implementation details""" if isinstance(keywds['name'], Callable) and \ None is keywds['function']: keywds['function'] = keywds['name'] keywds['name'] = None return keywds
[ "def", "__normalize_args", "(", "*", "*", "keywds", ")", ":", "if", "isinstance", "(", "keywds", "[", "'name'", "]", ",", "Callable", ")", "and", "None", "is", "keywds", "[", "'function'", "]", ":", "keywds", "[", "'function'", "]", "=", "keywds", "[", "'name'", "]", "keywds", "[", "'name'", "]", "=", "None", "return", "keywds" ]
implementation details
[ "implementation", "details" ]
python
train
thwehner/python-firepit
setup.py
https://github.com/thwehner/python-firepit/blob/68aa3b9c9e034e6a9a3498d997ac8d9a03f8c0f9/setup.py#L12-L20
def get_package_version(): """returns package version without importing it""" base = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(base, "firepit/__init__.py")) as pkg: for line in pkg: m = version.match(line.strip()) if not m: continue return ".".join(m.groups()[0].split(", "))
[ "def", "get_package_version", "(", ")", ":", "base", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "base", ",", "\"firepit/__init__.py\"", ")", ")", "as", "pkg", ":", "for", "line", "in", "pkg", ":", "m", "=", "version", ".", "match", "(", "line", ".", "strip", "(", ")", ")", "if", "not", "m", ":", "continue", "return", "\".\"", ".", "join", "(", "m", ".", "groups", "(", ")", "[", "0", "]", ".", "split", "(", "\", \"", ")", ")" ]
returns package version without importing it
[ "returns", "package", "version", "without", "importing", "it" ]
python
train
vtkiorg/vtki
vtki/renderer.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L499-L503
def camera_position(self): """ Returns camera position of active render window """ return [self.camera.GetPosition(), self.camera.GetFocalPoint(), self.camera.GetViewUp()]
[ "def", "camera_position", "(", "self", ")", ":", "return", "[", "self", ".", "camera", ".", "GetPosition", "(", ")", ",", "self", ".", "camera", ".", "GetFocalPoint", "(", ")", ",", "self", ".", "camera", ".", "GetViewUp", "(", ")", "]" ]
Returns camera position of active render window
[ "Returns", "camera", "position", "of", "active", "render", "window" ]
python
train
kgori/treeCl
treeCl/collection.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/collection.py#L361-L391
def calc_distances(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True): """ Calculate fast approximate intra-alignment pairwise distances and variances using ML (requires ML models to have been set up using `calc_trees`). :return: None (all side effects) """ if indices is None: indices = list(range(len(self))) if task_interface is None: task_interface = tasks.MLDistanceTaskInterface() records = [self[i] for i in indices] # Assemble argument lists args, to_delete = task_interface.scrape_args(records) # Dispatch msg = '{} estimation'.format(task_interface.name) if show_progress else '' map_result = jobhandler(task_interface.get_task(), args, msg, batchsize) # Process results with fileIO.TempFileList(to_delete): # pbar = setup_progressbar('Processing results', len(map_result)) # j = 0 # pbar.start() for rec, result in zip(records, map_result): rec.parameters.partitions.distances = result['partitions'][0]['distances'] rec.parameters.partitions.variances = result['partitions'][0]['variances'] rec.parameters.nj_tree = result['nj_tree']
[ "def", "calc_distances", "(", "self", ",", "indices", "=", "None", ",", "task_interface", "=", "None", ",", "jobhandler", "=", "default_jobhandler", ",", "batchsize", "=", "1", ",", "show_progress", "=", "True", ")", ":", "if", "indices", "is", "None", ":", "indices", "=", "list", "(", "range", "(", "len", "(", "self", ")", ")", ")", "if", "task_interface", "is", "None", ":", "task_interface", "=", "tasks", ".", "MLDistanceTaskInterface", "(", ")", "records", "=", "[", "self", "[", "i", "]", "for", "i", "in", "indices", "]", "# Assemble argument lists", "args", ",", "to_delete", "=", "task_interface", ".", "scrape_args", "(", "records", ")", "# Dispatch", "msg", "=", "'{} estimation'", ".", "format", "(", "task_interface", ".", "name", ")", "if", "show_progress", "else", "''", "map_result", "=", "jobhandler", "(", "task_interface", ".", "get_task", "(", ")", ",", "args", ",", "msg", ",", "batchsize", ")", "# Process results", "with", "fileIO", ".", "TempFileList", "(", "to_delete", ")", ":", "# pbar = setup_progressbar('Processing results', len(map_result))", "# j = 0", "# pbar.start()", "for", "rec", ",", "result", "in", "zip", "(", "records", ",", "map_result", ")", ":", "rec", ".", "parameters", ".", "partitions", ".", "distances", "=", "result", "[", "'partitions'", "]", "[", "0", "]", "[", "'distances'", "]", "rec", ".", "parameters", ".", "partitions", ".", "variances", "=", "result", "[", "'partitions'", "]", "[", "0", "]", "[", "'variances'", "]", "rec", ".", "parameters", ".", "nj_tree", "=", "result", "[", "'nj_tree'", "]" ]
Calculate fast approximate intra-alignment pairwise distances and variances using ML (requires ML models to have been set up using `calc_trees`). :return: None (all side effects)
[ "Calculate", "fast", "approximate", "intra", "-", "alignment", "pairwise", "distances", "and", "variances", "using", "ML", "(", "requires", "ML", "models", "to", "have", "been", "set", "up", "using", "calc_trees", ")", ".", ":", "return", ":", "None", "(", "all", "side", "effects", ")" ]
python
train
rootpy/rootpy
rootpy/extern/pyparsing.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/extern/pyparsing.py#L3189-L3213
def matchPreviousExpr(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do *not* use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s,l,t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s,l,t): theseTokens = _flatten(t.asList()) if theseTokens != matchTokens: raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) return rep
[ "def", "matchPreviousExpr", "(", "expr", ")", ":", "rep", "=", "Forward", "(", ")", "e2", "=", "expr", ".", "copy", "(", ")", "rep", "<<=", "e2", "def", "copyTokenToRepeater", "(", "s", ",", "l", ",", "t", ")", ":", "matchTokens", "=", "_flatten", "(", "t", ".", "asList", "(", ")", ")", "def", "mustMatchTheseTokens", "(", "s", ",", "l", ",", "t", ")", ":", "theseTokens", "=", "_flatten", "(", "t", ".", "asList", "(", ")", ")", "if", "theseTokens", "!=", "matchTokens", ":", "raise", "ParseException", "(", "\"\"", ",", "0", ",", "\"\"", ")", "rep", ".", "setParseAction", "(", "mustMatchTheseTokens", ",", "callDuringTry", "=", "True", ")", "expr", ".", "addParseAction", "(", "copyTokenToRepeater", ",", "callDuringTry", "=", "True", ")", "return", "rep" ]
Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do *not* use with packrat parsing enabled.
[ "Helper", "to", "define", "an", "expression", "that", "is", "indirectly", "defined", "from", "the", "tokens", "matched", "in", "a", "previous", "expression", "that", "is", "it", "looks", "for", "a", "repeat", "of", "a", "previous", "expression", ".", "For", "example", "::", "first", "=", "Word", "(", "nums", ")", "second", "=", "matchPreviousExpr", "(", "first", ")", "matchExpr", "=", "first", "+", ":", "+", "second", "will", "match", "C", "{", "1", ":", "1", "}", "but", "not", "C", "{", "1", ":", "2", "}", ".", "Because", "this", "matches", "by", "expressions", "will", "*", "not", "*", "match", "the", "leading", "C", "{", "1", ":", "1", "}", "in", "C", "{", "1", ":", "10", "}", ";", "the", "expressions", "are", "evaluated", "first", "and", "then", "compared", "so", "C", "{", "1", "}", "is", "compared", "with", "C", "{", "10", "}", ".", "Do", "*", "not", "*", "use", "with", "packrat", "parsing", "enabled", "." ]
python
train
squaresLab/BugZoo
bugzoo/mgr/container.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/container.py#L262-L272
def is_alive(self, container: Container) -> bool: """ Determines whether a given container is still alive. Returns: `True` if the underlying Docker container for the given BugZoo container is still alive, otherwise `False`. """ uid = container.uid return uid in self.__dockerc and \ self.__dockerc[uid].status == 'running'
[ "def", "is_alive", "(", "self", ",", "container", ":", "Container", ")", "->", "bool", ":", "uid", "=", "container", ".", "uid", "return", "uid", "in", "self", ".", "__dockerc", "and", "self", ".", "__dockerc", "[", "uid", "]", ".", "status", "==", "'running'" ]
Determines whether a given container is still alive. Returns: `True` if the underlying Docker container for the given BugZoo container is still alive, otherwise `False`.
[ "Determines", "whether", "a", "given", "container", "is", "still", "alive", "." ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/analyzers/keyword_detector.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/analyzers/keyword_detector.py#L76-L94
def get_dc_keywords(index_page): """ Return list of `keywords` parsed from Dublin core. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects. """ keyword_lists = ( keyword_list.split() for keyword_list in parse_meta(index_page, "dc.keywords", "DC") ) return [ SourceString(keyword, source="DC") for keyword in sum(keyword_lists, []) # flattern the list ]
[ "def", "get_dc_keywords", "(", "index_page", ")", ":", "keyword_lists", "=", "(", "keyword_list", ".", "split", "(", ")", "for", "keyword_list", "in", "parse_meta", "(", "index_page", ",", "\"dc.keywords\"", ",", "\"DC\"", ")", ")", "return", "[", "SourceString", "(", "keyword", ",", "source", "=", "\"DC\"", ")", "for", "keyword", "in", "sum", "(", "keyword_lists", ",", "[", "]", ")", "# flattern the list", "]" ]
Return list of `keywords` parsed from Dublin core. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects.
[ "Return", "list", "of", "keywords", "parsed", "from", "Dublin", "core", "." ]
python
train
pandas-dev/pandas
pandas/io/excel/_openpyxl.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_openpyxl.py#L38-L56
def _convert_to_style(cls, style_dict): """ converts a style_dict to an openpyxl style object Parameters ---------- style_dict : style dictionary to convert """ from openpyxl.style import Style xls_style = Style() for key, value in style_dict.items(): for nk, nv in value.items(): if key == "borders": (xls_style.borders.__getattribute__(nk) .__setattr__('border_style', nv)) else: xls_style.__getattribute__(key).__setattr__(nk, nv) return xls_style
[ "def", "_convert_to_style", "(", "cls", ",", "style_dict", ")", ":", "from", "openpyxl", ".", "style", "import", "Style", "xls_style", "=", "Style", "(", ")", "for", "key", ",", "value", "in", "style_dict", ".", "items", "(", ")", ":", "for", "nk", ",", "nv", "in", "value", ".", "items", "(", ")", ":", "if", "key", "==", "\"borders\"", ":", "(", "xls_style", ".", "borders", ".", "__getattribute__", "(", "nk", ")", ".", "__setattr__", "(", "'border_style'", ",", "nv", ")", ")", "else", ":", "xls_style", ".", "__getattribute__", "(", "key", ")", ".", "__setattr__", "(", "nk", ",", "nv", ")", "return", "xls_style" ]
converts a style_dict to an openpyxl style object Parameters ---------- style_dict : style dictionary to convert
[ "converts", "a", "style_dict", "to", "an", "openpyxl", "style", "object", "Parameters", "----------", "style_dict", ":", "style", "dictionary", "to", "convert" ]
python
train
ui/django-thumbnails
thumbnails/files.py
https://github.com/ui/django-thumbnails/blob/5cef55e7f167060458709ed760dd43981124796a/thumbnails/files.py#L69-L91
def get(self, size, create=True): """ Returns a Thumbnail instance. First check whether thumbnail is already cached. If it doesn't: 1. Try to fetch the thumbnail 2. Create thumbnail if it's not present 3. Cache the thumbnail for future use """ if self._thumbnails is None: self._refresh_cache() thumbnail = self._thumbnails.get(size) if thumbnail is None: thumbnail = images.get(self.source_image.name, size, self.metadata_backend, self.storage) if thumbnail is None: thumbnail = self.create(size) self._thumbnails[size] = thumbnail return thumbnail
[ "def", "get", "(", "self", ",", "size", ",", "create", "=", "True", ")", ":", "if", "self", ".", "_thumbnails", "is", "None", ":", "self", ".", "_refresh_cache", "(", ")", "thumbnail", "=", "self", ".", "_thumbnails", ".", "get", "(", "size", ")", "if", "thumbnail", "is", "None", ":", "thumbnail", "=", "images", ".", "get", "(", "self", ".", "source_image", ".", "name", ",", "size", ",", "self", ".", "metadata_backend", ",", "self", ".", "storage", ")", "if", "thumbnail", "is", "None", ":", "thumbnail", "=", "self", ".", "create", "(", "size", ")", "self", ".", "_thumbnails", "[", "size", "]", "=", "thumbnail", "return", "thumbnail" ]
Returns a Thumbnail instance. First check whether thumbnail is already cached. If it doesn't: 1. Try to fetch the thumbnail 2. Create thumbnail if it's not present 3. Cache the thumbnail for future use
[ "Returns", "a", "Thumbnail", "instance", ".", "First", "check", "whether", "thumbnail", "is", "already", "cached", ".", "If", "it", "doesn", "t", ":", "1", ".", "Try", "to", "fetch", "the", "thumbnail", "2", ".", "Create", "thumbnail", "if", "it", "s", "not", "present", "3", ".", "Cache", "the", "thumbnail", "for", "future", "use" ]
python
test
projectshift/shift-schema
shiftschema/validators/email.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/validators/email.py#L45-L74
def regex(self): """ RFC822 Email Address Regex Originally written by Cal Henderson c.f. http://iamcal.com/publish/articles/php/parsing_email/ Translated to Python by Tim Fletcher with changes suggested by Dan Kubb http://tfletcher.com/lib/rfc822.py Licensed under a Creative Commons Attribution-ShareAlike 2.5 License http://creativecommons.org/licenses/by-sa/2.5/ :return: """ qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]' dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]' atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40' atom += '\\x5b-\\x5d\\x7f-\\xff]+' quoted_pair = '\\x5c[\\x00-\\x7f]' domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair) quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair) domain_ref = atom sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal) word = "(?:%s|%s)" % (atom, quoted_string) domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain) local_part = "%s(?:\\x2e%s)*" % (word, word) addr_spec = "%s\\x40%s" % (local_part, domain) email_address = re.compile('\A%s\Z' % addr_spec) return email_address
[ "def", "regex", "(", "self", ")", ":", "qtext", "=", "'[^\\\\x0d\\\\x22\\\\x5c\\\\x80-\\\\xff]'", "dtext", "=", "'[^\\\\x0d\\\\x5b-\\\\x5d\\\\x80-\\\\xff]'", "atom", "=", "'[^\\\\x00-\\\\x20\\\\x22\\\\x28\\\\x29\\\\x2c\\\\x2e\\\\x3a-\\\\x3c\\\\x3e\\\\x40'", "atom", "+=", "'\\\\x5b-\\\\x5d\\\\x7f-\\\\xff]+'", "quoted_pair", "=", "'\\\\x5c[\\\\x00-\\\\x7f]'", "domain_literal", "=", "\"\\\\x5b(?:%s|%s)*\\\\x5d\"", "%", "(", "dtext", ",", "quoted_pair", ")", "quoted_string", "=", "\"\\\\x22(?:%s|%s)*\\\\x22\"", "%", "(", "qtext", ",", "quoted_pair", ")", "domain_ref", "=", "atom", "sub_domain", "=", "\"(?:%s|%s)\"", "%", "(", "domain_ref", ",", "domain_literal", ")", "word", "=", "\"(?:%s|%s)\"", "%", "(", "atom", ",", "quoted_string", ")", "domain", "=", "\"%s(?:\\\\x2e%s)*\"", "%", "(", "sub_domain", ",", "sub_domain", ")", "local_part", "=", "\"%s(?:\\\\x2e%s)*\"", "%", "(", "word", ",", "word", ")", "addr_spec", "=", "\"%s\\\\x40%s\"", "%", "(", "local_part", ",", "domain", ")", "email_address", "=", "re", ".", "compile", "(", "'\\A%s\\Z'", "%", "addr_spec", ")", "return", "email_address" ]
RFC822 Email Address Regex Originally written by Cal Henderson c.f. http://iamcal.com/publish/articles/php/parsing_email/ Translated to Python by Tim Fletcher with changes suggested by Dan Kubb http://tfletcher.com/lib/rfc822.py Licensed under a Creative Commons Attribution-ShareAlike 2.5 License http://creativecommons.org/licenses/by-sa/2.5/ :return:
[ "RFC822", "Email", "Address", "Regex", "Originally", "written", "by", "Cal", "Henderson", "c", ".", "f", ".", "http", ":", "//", "iamcal", ".", "com", "/", "publish", "/", "articles", "/", "php", "/", "parsing_email", "/", "Translated", "to", "Python", "by", "Tim", "Fletcher", "with", "changes", "suggested", "by", "Dan", "Kubb", "http", ":", "//", "tfletcher", ".", "com", "/", "lib", "/", "rfc822", ".", "py", "Licensed", "under", "a", "Creative", "Commons", "Attribution", "-", "ShareAlike", "2", ".", "5", "License", "http", ":", "//", "creativecommons", ".", "org", "/", "licenses", "/", "by", "-", "sa", "/", "2", ".", "5", "/", ":", "return", ":" ]
python
train
mixmastamyk/console
console/utils.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/utils.py#L122-L144
def strip_ansi(text, c1=False, osc=False): ''' Strip ANSI escape sequences from a portion of text. https://stackoverflow.com/a/38662876/450917 Arguments: line: str osc: bool - include OSC commands in the strippage. c1: bool - include C1 commands in the strippage. Notes: Enabling c1 and osc stripping is less efficient and the two options can mildly conflict with one another. The less problematic order was chosen, so there may still be rare C1 OSC fragments left over. ''' text = ansi_csi0_finder.sub('', text) if osc: text = ansi_osc0_finder.sub('', text) if c1: text = ansi_csi1_finder.sub('', text) # go first, less destructive if osc: text = ansi_osc1_finder.sub('', text) return text
[ "def", "strip_ansi", "(", "text", ",", "c1", "=", "False", ",", "osc", "=", "False", ")", ":", "text", "=", "ansi_csi0_finder", ".", "sub", "(", "''", ",", "text", ")", "if", "osc", ":", "text", "=", "ansi_osc0_finder", ".", "sub", "(", "''", ",", "text", ")", "if", "c1", ":", "text", "=", "ansi_csi1_finder", ".", "sub", "(", "''", ",", "text", ")", "# go first, less destructive", "if", "osc", ":", "text", "=", "ansi_osc1_finder", ".", "sub", "(", "''", ",", "text", ")", "return", "text" ]
Strip ANSI escape sequences from a portion of text. https://stackoverflow.com/a/38662876/450917 Arguments: line: str osc: bool - include OSC commands in the strippage. c1: bool - include C1 commands in the strippage. Notes: Enabling c1 and osc stripping is less efficient and the two options can mildly conflict with one another. The less problematic order was chosen, so there may still be rare C1 OSC fragments left over.
[ "Strip", "ANSI", "escape", "sequences", "from", "a", "portion", "of", "text", ".", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "38662876", "/", "450917" ]
python
train
hardbyte/python-can
can/interfaces/socketcan/socketcan.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/socketcan/socketcan.py#L78-L118
def build_can_frame(msg): """ CAN frame packing/unpacking (see 'struct can_frame' in <linux/can.h>) /** * struct can_frame - basic CAN frame structure * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. * @can_dlc: the data length field of the CAN frame * @data: the CAN frame payload. */ struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 can_dlc; /* data length code: 0 .. 8 */ __u8 data[8] __attribute__((aligned(8))); }; /** * struct canfd_frame - CAN flexible data rate frame structure * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN) * @flags: additional flags for CAN FD * @__res0: reserved / padding * @__res1: reserved / padding * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte) */ struct canfd_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 len; /* frame payload length in byte */ __u8 flags; /* additional flags for CAN FD */ __u8 __res0; /* reserved / padding */ __u8 __res1; /* reserved / padding */ __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8))); }; """ can_id = _add_flags_to_can_id(msg) flags = 0 if msg.bitrate_switch: flags |= CANFD_BRS if msg.error_state_indicator: flags |= CANFD_ESI max_len = 64 if msg.is_fd else 8 data = bytes(msg.data).ljust(max_len, b'\x00') return CAN_FRAME_HEADER_STRUCT.pack(can_id, msg.dlc, flags) + data
[ "def", "build_can_frame", "(", "msg", ")", ":", "can_id", "=", "_add_flags_to_can_id", "(", "msg", ")", "flags", "=", "0", "if", "msg", ".", "bitrate_switch", ":", "flags", "|=", "CANFD_BRS", "if", "msg", ".", "error_state_indicator", ":", "flags", "|=", "CANFD_ESI", "max_len", "=", "64", "if", "msg", ".", "is_fd", "else", "8", "data", "=", "bytes", "(", "msg", ".", "data", ")", ".", "ljust", "(", "max_len", ",", "b'\\x00'", ")", "return", "CAN_FRAME_HEADER_STRUCT", ".", "pack", "(", "can_id", ",", "msg", ".", "dlc", ",", "flags", ")", "+", "data" ]
CAN frame packing/unpacking (see 'struct can_frame' in <linux/can.h>) /** * struct can_frame - basic CAN frame structure * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. * @can_dlc: the data length field of the CAN frame * @data: the CAN frame payload. */ struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 can_dlc; /* data length code: 0 .. 8 */ __u8 data[8] __attribute__((aligned(8))); }; /** * struct canfd_frame - CAN flexible data rate frame structure * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN) * @flags: additional flags for CAN FD * @__res0: reserved / padding * @__res1: reserved / padding * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte) */ struct canfd_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 len; /* frame payload length in byte */ __u8 flags; /* additional flags for CAN FD */ __u8 __res0; /* reserved / padding */ __u8 __res1; /* reserved / padding */ __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8))); };
[ "CAN", "frame", "packing", "/", "unpacking", "(", "see", "struct", "can_frame", "in", "<linux", "/", "can", ".", "h", ">", ")", "/", "**", "*", "struct", "can_frame", "-", "basic", "CAN", "frame", "structure", "*", "@can_id", ":", "the", "CAN", "ID", "of", "the", "frame", "and", "CAN_", "*", "_FLAG", "flags", "see", "above", ".", "*", "@can_dlc", ":", "the", "data", "length", "field", "of", "the", "CAN", "frame", "*", "@data", ":", "the", "CAN", "frame", "payload", ".", "*", "/", "struct", "can_frame", "{", "canid_t", "can_id", ";", "/", "*", "32", "bit", "CAN_ID", "+", "EFF", "/", "RTR", "/", "ERR", "flags", "*", "/", "__u8", "can_dlc", ";", "/", "*", "data", "length", "code", ":", "0", "..", "8", "*", "/", "__u8", "data", "[", "8", "]", "__attribute__", "((", "aligned", "(", "8", ")))", ";", "}", ";" ]
python
train
bitlabstudio/django-libs
django_libs/format_utils.py
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/format_utils.py#L86-L122
def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ format_type = str_encode(format_type) if use_l10n or (use_l10n is None and settings.USE_L10N): if lang is None: lang = get_language() cache_key = (format_type, lang) try: cached = _format_cache[cache_key] if cached is not None: return cached else: # Return the general setting by default return getattr(settings, format_type) except KeyError: for module in get_format_modules(lang): try: val = getattr(module, format_type) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: if isinstance(val, tuple): val = list(val) val.append(iso_input) _format_cache[cache_key] = val return val except AttributeError: pass _format_cache[cache_key] = None return getattr(settings, format_type)
[ "def", "get_format", "(", "format_type", ",", "lang", "=", "None", ",", "use_l10n", "=", "None", ")", ":", "format_type", "=", "str_encode", "(", "format_type", ")", "if", "use_l10n", "or", "(", "use_l10n", "is", "None", "and", "settings", ".", "USE_L10N", ")", ":", "if", "lang", "is", "None", ":", "lang", "=", "get_language", "(", ")", "cache_key", "=", "(", "format_type", ",", "lang", ")", "try", ":", "cached", "=", "_format_cache", "[", "cache_key", "]", "if", "cached", "is", "not", "None", ":", "return", "cached", "else", ":", "# Return the general setting by default", "return", "getattr", "(", "settings", ",", "format_type", ")", "except", "KeyError", ":", "for", "module", "in", "get_format_modules", "(", "lang", ")", ":", "try", ":", "val", "=", "getattr", "(", "module", ",", "format_type", ")", "for", "iso_input", "in", "ISO_INPUT_FORMATS", ".", "get", "(", "format_type", ",", "(", ")", ")", ":", "if", "iso_input", "not", "in", "val", ":", "if", "isinstance", "(", "val", ",", "tuple", ")", ":", "val", "=", "list", "(", "val", ")", "val", ".", "append", "(", "iso_input", ")", "_format_cache", "[", "cache_key", "]", "=", "val", "return", "val", "except", "AttributeError", ":", "pass", "_format_cache", "[", "cache_key", "]", "=", "None", "return", "getattr", "(", "settings", ",", "format_type", ")" ]
For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N.
[ "For", "a", "specific", "format", "type", "returns", "the", "format", "for", "the", "current", "language", "(", "locale", ")", "defaults", "to", "the", "format", "in", "the", "settings", ".", "format_type", "is", "the", "name", "of", "the", "format", "e", ".", "g", ".", "DATE_FORMAT" ]
python
train
thanethomson/statik
statik/utils.py
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L208-L219
def generate_quickstart(project_path): """Generates all of the basic paths for a Statik project within the given project path. If the project path doesn't exist, it will be created.""" ensure_path_exists(project_path) ensure_file_exists(os.path.join(project_path, "config.yml"), DEFAULT_CONFIG_CONTENT) ensure_path_exists(os.path.join(project_path, 'models')) ensure_path_exists(os.path.join(project_path, 'data')) ensure_path_exists(os.path.join(project_path, 'themes')) ensure_path_exists(os.path.join(project_path, 'templates')) ensure_path_exists(os.path.join(project_path, 'templatetags')) ensure_path_exists(os.path.join(project_path, 'views')) ensure_path_exists(os.path.join(project_path, 'assets'))
[ "def", "generate_quickstart", "(", "project_path", ")", ":", "ensure_path_exists", "(", "project_path", ")", "ensure_file_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "\"config.yml\"", ")", ",", "DEFAULT_CONFIG_CONTENT", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'models'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'data'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'themes'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'templates'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'templatetags'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'views'", ")", ")", "ensure_path_exists", "(", "os", ".", "path", ".", "join", "(", "project_path", ",", "'assets'", ")", ")" ]
Generates all of the basic paths for a Statik project within the given project path. If the project path doesn't exist, it will be created.
[ "Generates", "all", "of", "the", "basic", "paths", "for", "a", "Statik", "project", "within", "the", "given", "project", "path", ".", "If", "the", "project", "path", "doesn", "t", "exist", "it", "will", "be", "created", "." ]
python
train