nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
django-treebeard/django-treebeard
c46ffa9cd9c6c4e07142958b798dac96218627c6
treebeard/al_tree.py
python
AL_Node.get_parent
(self, update=False)
:returns: the parent node of the current node object.
:returns: the parent node of the current node object.
[ ":", "returns", ":", "the", "parent", "node", "of", "the", "current", "node", "object", "." ]
def get_parent(self, update=False): """:returns: the parent node of the current node object.""" if self._meta.proxy_for_model: # the current node is a proxy model; the returned parent # should be the same proxy model, so we need to explicitly # fetch it as an instance of that model rather than simply # following the 'parent' relation if self.parent_id is None: return None else: return self.__class__.objects.get(pk=self.parent_id) else: return self.parent
[ "def", "get_parent", "(", "self", ",", "update", "=", "False", ")", ":", "if", "self", ".", "_meta", ".", "proxy_for_model", ":", "# the current node is a proxy model; the returned parent", "# should be the same proxy model, so we need to explicitly", "# fetch it as an instance of that model rather than simply", "# following the 'parent' relation", "if", "self", ".", "parent_id", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "__class__", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "parent_id", ")", "else", ":", "return", "self", ".", "parent" ]
https://github.com/django-treebeard/django-treebeard/blob/c46ffa9cd9c6c4e07142958b798dac96218627c6/treebeard/al_tree.py#L112-L124
sparklingpandas/sparklingpandas
7d549df4348c979042b683c355aa778fc6d3a768
sparklingpandas/dataframe.py
python
DataFrame.to_spark_sql
(self)
return self._schema_rdd
A Sparkling Pandas specific function to turn a DDF into something that Spark SQL can query. To use the result you will need to call sqlCtx.inferSchema(rdd) and then register the result as a table. Once Spark 1.1 is released this function may be deprecated and replacted with to_spark_sql_schema_rdd.
A Sparkling Pandas specific function to turn a DDF into something that Spark SQL can query. To use the result you will need to call sqlCtx.inferSchema(rdd) and then register the result as a table. Once Spark 1.1 is released this function may be deprecated and replacted with to_spark_sql_schema_rdd.
[ "A", "Sparkling", "Pandas", "specific", "function", "to", "turn", "a", "DDF", "into", "something", "that", "Spark", "SQL", "can", "query", ".", "To", "use", "the", "result", "you", "will", "need", "to", "call", "sqlCtx", ".", "inferSchema", "(", "rdd", ")", "and", "then", "register", "the", "result", "as", "a", "table", ".", "Once", "Spark", "1", ".", "1", "is", "released", "this", "function", "may", "be", "deprecated", "and", "replacted", "with", "to_spark_sql_schema_rdd", "." ]
def to_spark_sql(self): """A Sparkling Pandas specific function to turn a DDF into something that Spark SQL can query. To use the result you will need to call sqlCtx.inferSchema(rdd) and then register the result as a table. Once Spark 1.1 is released this function may be deprecated and replacted with to_spark_sql_schema_rdd.""" return self._schema_rdd
[ "def", "to_spark_sql", "(", "self", ")", ":", "return", "self", ".", "_schema_rdd" ]
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/dataframe.py#L157-L163
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-windows/x86/pyasn1/codec/ber/decoder.py
python
AnyDecoder.valueDecoder
(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options)
return self._createComponent(asn1Spec, tagSet, head, **options), tail
[]
def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if asn1Spec is None: isUntagged = True elif asn1Spec.__class__ is tagmap.TagMap: isUntagged = tagSet not in asn1Spec.tagMap else: isUntagged = tagSet != asn1Spec.tagSet if isUntagged: fullSubstrate = options['fullSubstrate'] # untagged Any container, recover inner header substrate length += len(fullSubstrate) - len(substrate) substrate = fullSubstrate if LOG: LOG('decoding as untagged ANY, substrate %s' % debug.hexdump(substrate)) if substrateFun: return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length) head, tail = substrate[:length], substrate[length:] return self._createComponent(asn1Spec, tagSet, head, **options), tail
[ "def", "valueDecoder", "(", "self", ",", "substrate", ",", "asn1Spec", ",", "tagSet", "=", "None", ",", "length", "=", "None", ",", "state", "=", "None", ",", "decodeFun", "=", "None", ",", "substrateFun", "=", "None", ",", "*", "*", "options", ")", ":", "if", "asn1Spec", "is", "None", ":", "isUntagged", "=", "True", "elif", "asn1Spec", ".", "__class__", "is", "tagmap", ".", "TagMap", ":", "isUntagged", "=", "tagSet", "not", "in", "asn1Spec", ".", "tagMap", "else", ":", "isUntagged", "=", "tagSet", "!=", "asn1Spec", ".", "tagSet", "if", "isUntagged", ":", "fullSubstrate", "=", "options", "[", "'fullSubstrate'", "]", "# untagged Any container, recover inner header substrate", "length", "+=", "len", "(", "fullSubstrate", ")", "-", "len", "(", "substrate", ")", "substrate", "=", "fullSubstrate", "if", "LOG", ":", "LOG", "(", "'decoding as untagged ANY, substrate %s'", "%", "debug", ".", "hexdump", "(", "substrate", ")", ")", "if", "substrateFun", ":", "return", "substrateFun", "(", "self", ".", "_createComponent", "(", "asn1Spec", ",", "tagSet", ",", "noValue", ",", "*", "*", "options", ")", ",", "substrate", ",", "length", ")", "head", ",", "tail", "=", "substrate", "[", ":", "length", "]", ",", "substrate", "[", "length", ":", "]", "return", "self", ".", "_createComponent", "(", "asn1Spec", ",", "tagSet", ",", "head", ",", "*", "*", "options", ")", ",", "tail" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/pyasn1/codec/ber/decoder.py#L1080-L1109
theotherp/nzbhydra
4b03d7f769384b97dfc60dade4806c0fc987514e
libs/flask/app.py
python
Flask.log_exception
(self, exc_info)
Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8
Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`.
[ "Logs", "an", "exception", ".", "This", "is", "called", "by", ":", "meth", ":", "handle_exception", "if", "debugging", "is", "disabled", "and", "right", "before", "the", "handler", "is", "called", ".", "The", "default", "implementation", "logs", "the", "exception", "as", "error", "on", "the", ":", "attr", ":", "logger", "." ]
def log_exception(self, exc_info): """Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8 """ self.logger.error('Exception on %s [%s]' % ( request.path, request.method ), exc_info=exc_info)
[ "def", "log_exception", "(", "self", ",", "exc_info", ")", ":", "self", ".", "logger", ".", "error", "(", "'Exception on %s [%s]'", "%", "(", "request", ".", "path", ",", "request", ".", "method", ")", ",", "exc_info", "=", "exc_info", ")" ]
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/flask/app.py#L1412-L1423
codebox/markov-text
f753fa6e19d632984e76063ae505197e8c76628b
gen.py
python
Generator.__init__
(self, name, db, rnd)
[]
def __init__(self, name, db, rnd): self.name = name self.db = db self.rnd = rnd
[ "def", "__init__", "(", "self", ",", "name", ",", "db", ",", "rnd", ")", ":", "self", ".", "name", "=", "name", "self", ".", "db", "=", "db", "self", ".", "rnd", "=", "rnd" ]
https://github.com/codebox/markov-text/blob/f753fa6e19d632984e76063ae505197e8c76628b/gen.py#L4-L7
captainhammy/Houdini-Toolbox
a4e61c3c0296b3a3a153a8dd42297c316be1b0f3
python/houdini_toolbox/inline/api.py
python
bounding_box_volume
(bbox: hou.BoundingBox)
return _cpp_methods.boundingBoxVolume(bbox)
Calculate the volume of this bounding box. :param bbox: The bounding box to get the volume of.. :return: The volume of the box.
Calculate the volume of this bounding box.
[ "Calculate", "the", "volume", "of", "this", "bounding", "box", "." ]
def bounding_box_volume(bbox: hou.BoundingBox) -> float: """Calculate the volume of this bounding box. :param bbox: The bounding box to get the volume of.. :return: The volume of the box. """ return _cpp_methods.boundingBoxVolume(bbox)
[ "def", "bounding_box_volume", "(", "bbox", ":", "hou", ".", "BoundingBox", ")", "->", "float", ":", "return", "_cpp_methods", ".", "boundingBoxVolume", "(", "bbox", ")" ]
https://github.com/captainhammy/Houdini-Toolbox/blob/a4e61c3c0296b3a3a153a8dd42297c316be1b0f3/python/houdini_toolbox/inline/api.py#L1962-L1969
Project-MONAI/MONAI
83f8b06372a3803ebe9281300cb794a1f3395018
monai/data/grid_dataset.py
python
PatchDataset.__init__
( self, dataset: Sequence, patch_func: Callable, samples_per_image: int = 1, transform: Optional[Callable] = None )
Args: dataset: an image dataset to extract patches from. patch_func: converts an input image (item from dataset) into a sequence of image patches. patch_func(dataset[idx]) must return a sequence of patches (length `samples_per_image`). samples_per_image: `patch_func` should return a sequence of `samples_per_image` elements. transform: transform applied to each patch.
Args: dataset: an image dataset to extract patches from. patch_func: converts an input image (item from dataset) into a sequence of image patches. patch_func(dataset[idx]) must return a sequence of patches (length `samples_per_image`). samples_per_image: `patch_func` should return a sequence of `samples_per_image` elements. transform: transform applied to each patch.
[ "Args", ":", "dataset", ":", "an", "image", "dataset", "to", "extract", "patches", "from", ".", "patch_func", ":", "converts", "an", "input", "image", "(", "item", "from", "dataset", ")", "into", "a", "sequence", "of", "image", "patches", ".", "patch_func", "(", "dataset", "[", "idx", "]", ")", "must", "return", "a", "sequence", "of", "patches", "(", "length", "samples_per_image", ")", ".", "samples_per_image", ":", "patch_func", "should", "return", "a", "sequence", "of", "samples_per_image", "elements", ".", "transform", ":", "transform", "applied", "to", "each", "patch", "." ]
def __init__( self, dataset: Sequence, patch_func: Callable, samples_per_image: int = 1, transform: Optional[Callable] = None ) -> None: """ Args: dataset: an image dataset to extract patches from. patch_func: converts an input image (item from dataset) into a sequence of image patches. patch_func(dataset[idx]) must return a sequence of patches (length `samples_per_image`). samples_per_image: `patch_func` should return a sequence of `samples_per_image` elements. transform: transform applied to each patch. """ super().__init__(data=dataset, transform=transform) self.patch_func = patch_func if samples_per_image <= 0: raise ValueError("sampler_per_image must be a positive integer.") self.samples_per_image = int(samples_per_image)
[ "def", "__init__", "(", "self", ",", "dataset", ":", "Sequence", ",", "patch_func", ":", "Callable", ",", "samples_per_image", ":", "int", "=", "1", ",", "transform", ":", "Optional", "[", "Callable", "]", "=", "None", ")", "->", "None", ":", "super", "(", ")", ".", "__init__", "(", "data", "=", "dataset", ",", "transform", "=", "transform", ")", "self", ".", "patch_func", "=", "patch_func", "if", "samples_per_image", "<=", "0", ":", "raise", "ValueError", "(", "\"sampler_per_image must be a positive integer.\"", ")", "self", ".", "samples_per_image", "=", "int", "(", "samples_per_image", ")" ]
https://github.com/Project-MONAI/MONAI/blob/83f8b06372a3803ebe9281300cb794a1f3395018/monai/data/grid_dataset.py#L209-L225
pantsbuild/pants
2e126e78ffc40cb108408316b90e8beebee1df9e
src/python/pants/core/goals/generate_lockfiles.py
python
determine_resolves_to_generate
( all_known_user_resolve_names: Iterable[KnownUserResolveNames], all_tool_sentinels: Iterable[type[ToolLockfileSentinel]], requested_resolve_names: set[str], )
return requested_user_resolve_names, specified_sentinels
Apply the `--resolve` option to determine which resolves are specified. Return a tuple of `(user_resolves, tool_lockfile_sentinels)`.
Apply the `--resolve` option to determine which resolves are specified.
[ "Apply", "the", "--", "resolve", "option", "to", "determine", "which", "resolves", "are", "specified", "." ]
def determine_resolves_to_generate( all_known_user_resolve_names: Iterable[KnownUserResolveNames], all_tool_sentinels: Iterable[type[ToolLockfileSentinel]], requested_resolve_names: set[str], ) -> tuple[list[RequestedUserResolveNames], list[type[ToolLockfileSentinel]]]: """Apply the `--resolve` option to determine which resolves are specified. Return a tuple of `(user_resolves, tool_lockfile_sentinels)`. """ resolve_names_to_sentinels = { sentinel.options_scope: sentinel for sentinel in all_tool_sentinels } # TODO: check for ambiguity: between tools and user resolves, and across distinct # `KnownUserResolveNames`s. Update AmbiguousResolveNamesError to say where the resolve # name is defined, whereas right now we hardcode it to be the `[python]` option. if not requested_resolve_names: return [ known_resolve_names.requested_resolve_names_cls(known_resolve_names.names) for known_resolve_names in all_known_user_resolve_names ], list(all_tool_sentinels) requested_user_resolve_names = [] for known_resolve_names in all_known_user_resolve_names: requested = requested_resolve_names.intersection(known_resolve_names.names) if requested: requested_resolve_names -= requested requested_user_resolve_names.append( known_resolve_names.requested_resolve_names_cls(requested) ) specified_sentinels = [] for resolve, sentinel in resolve_names_to_sentinels.items(): if resolve in requested_resolve_names: requested_resolve_names.discard(resolve) specified_sentinels.append(sentinel) if requested_resolve_names: raise UnrecognizedResolveNamesError( unrecognized_resolve_names=sorted(requested_resolve_names), all_valid_names={ *itertools.chain.from_iterable( known_resolve_names.names for known_resolve_names in all_known_user_resolve_names ), *resolve_names_to_sentinels.keys(), }, description_of_origin="the option `--generate-lockfiles-resolve`", ) return requested_user_resolve_names, specified_sentinels
[ "def", "determine_resolves_to_generate", "(", "all_known_user_resolve_names", ":", "Iterable", "[", "KnownUserResolveNames", "]", ",", "all_tool_sentinels", ":", "Iterable", "[", "type", "[", "ToolLockfileSentinel", "]", "]", ",", "requested_resolve_names", ":", "set", "[", "str", "]", ",", ")", "->", "tuple", "[", "list", "[", "RequestedUserResolveNames", "]", ",", "list", "[", "type", "[", "ToolLockfileSentinel", "]", "]", "]", ":", "resolve_names_to_sentinels", "=", "{", "sentinel", ".", "options_scope", ":", "sentinel", "for", "sentinel", "in", "all_tool_sentinels", "}", "# TODO: check for ambiguity: between tools and user resolves, and across distinct", "# `KnownUserResolveNames`s. Update AmbiguousResolveNamesError to say where the resolve", "# name is defined, whereas right now we hardcode it to be the `[python]` option.", "if", "not", "requested_resolve_names", ":", "return", "[", "known_resolve_names", ".", "requested_resolve_names_cls", "(", "known_resolve_names", ".", "names", ")", "for", "known_resolve_names", "in", "all_known_user_resolve_names", "]", ",", "list", "(", "all_tool_sentinels", ")", "requested_user_resolve_names", "=", "[", "]", "for", "known_resolve_names", "in", "all_known_user_resolve_names", ":", "requested", "=", "requested_resolve_names", ".", "intersection", "(", "known_resolve_names", ".", "names", ")", "if", "requested", ":", "requested_resolve_names", "-=", "requested", "requested_user_resolve_names", ".", "append", "(", "known_resolve_names", ".", "requested_resolve_names_cls", "(", "requested", ")", ")", "specified_sentinels", "=", "[", "]", "for", "resolve", ",", "sentinel", "in", "resolve_names_to_sentinels", ".", "items", "(", ")", ":", "if", "resolve", "in", "requested_resolve_names", ":", "requested_resolve_names", ".", "discard", "(", "resolve", ")", "specified_sentinels", ".", "append", "(", "sentinel", ")", "if", "requested_resolve_names", ":", "raise", "UnrecognizedResolveNamesError", "(", "unrecognized_resolve_names", "=", "sorted", "(", "requested_resolve_names", ")", ",", "all_valid_names", "=", "{", "*", "itertools", ".", "chain", ".", "from_iterable", "(", "known_resolve_names", ".", "names", "for", "known_resolve_names", "in", "all_known_user_resolve_names", ")", ",", "*", "resolve_names_to_sentinels", ".", "keys", "(", ")", ",", "}", ",", "description_of_origin", "=", "\"the option `--generate-lockfiles-resolve`\"", ",", ")", "return", "requested_user_resolve_names", ",", "specified_sentinels" ]
https://github.com/pantsbuild/pants/blob/2e126e78ffc40cb108408316b90e8beebee1df9e/src/python/pants/core/goals/generate_lockfiles.py#L152-L203
google/coursebuilder-core
08f809db3226d9269e30d5edd0edd33bd22041f4
coursebuilder/models/courses.py
python
CourseModel13._update_dirty_objects
(self)
Update files owned by course.
Update files owned by course.
[ "Update", "files", "owned", "by", "course", "." ]
def _update_dirty_objects(self): """Update files owned by course.""" fs = self.app_context.fs # Update state of owned assessments. for unit in self._dirty_units: unit = self.find_unit_by_id(unit.unit_id) if not unit or verify.UNIT_TYPE_ASSESSMENT != unit.type: continue filename = self.get_assessment_filename(unit.unit_id) path = fs.impl.physical_to_logical(filename) if fs.isfile(path): self.set_file_content( filename, None, metadata_only=True, is_draft=not self.is_unit_available(unit)) # Update state of owned activities. for lesson in self._dirty_lessons: lesson = self.find_lesson_by_id(None, lesson.lesson_id) if not lesson or not lesson.has_activity: continue path = fs.impl.physical_to_logical( self.get_activity_filename(None, lesson.lesson_id)) if fs.isfile(path): fs.put( path, None, metadata_only=True, is_draft=not self.is_lesson_available(None, lesson))
[ "def", "_update_dirty_objects", "(", "self", ")", ":", "fs", "=", "self", ".", "app_context", ".", "fs", "# Update state of owned assessments.", "for", "unit", "in", "self", ".", "_dirty_units", ":", "unit", "=", "self", ".", "find_unit_by_id", "(", "unit", ".", "unit_id", ")", "if", "not", "unit", "or", "verify", ".", "UNIT_TYPE_ASSESSMENT", "!=", "unit", ".", "type", ":", "continue", "filename", "=", "self", ".", "get_assessment_filename", "(", "unit", ".", "unit_id", ")", "path", "=", "fs", ".", "impl", ".", "physical_to_logical", "(", "filename", ")", "if", "fs", ".", "isfile", "(", "path", ")", ":", "self", ".", "set_file_content", "(", "filename", ",", "None", ",", "metadata_only", "=", "True", ",", "is_draft", "=", "not", "self", ".", "is_unit_available", "(", "unit", ")", ")", "# Update state of owned activities.", "for", "lesson", "in", "self", ".", "_dirty_lessons", ":", "lesson", "=", "self", ".", "find_lesson_by_id", "(", "None", ",", "lesson", ".", "lesson_id", ")", "if", "not", "lesson", "or", "not", "lesson", ".", "has_activity", ":", "continue", "path", "=", "fs", ".", "impl", ".", "physical_to_logical", "(", "self", ".", "get_activity_filename", "(", "None", ",", "lesson", ".", "lesson_id", ")", ")", "if", "fs", ".", "isfile", "(", "path", ")", ":", "fs", ".", "put", "(", "path", ",", "None", ",", "metadata_only", "=", "True", ",", "is_draft", "=", "not", "self", ".", "is_lesson_available", "(", "None", ",", "lesson", ")", ")" ]
https://github.com/google/coursebuilder-core/blob/08f809db3226d9269e30d5edd0edd33bd22041f4/coursebuilder/models/courses.py#L1438-L1465
ansible-collections/community.general
3faffe8f47968a2400ba3c896c8901c03001a194
plugins/modules/ldap_attrs.py
python
LdapAttrs._order_values
(self, values)
return ordered_values
Preprend X-ORDERED index numbers to attribute's values.
Preprend X-ORDERED index numbers to attribute's values.
[ "Preprend", "X", "-", "ORDERED", "index", "numbers", "to", "attribute", "s", "values", "." ]
def _order_values(self, values): """ Preprend X-ORDERED index numbers to attribute's values. """ ordered_values = [] if isinstance(values, list): for index, value in enumerate(values): cleaned_value = re.sub(r'^\{\d+\}', '', value) ordered_values.append('{' + str(index) + '}' + cleaned_value) return ordered_values
[ "def", "_order_values", "(", "self", ",", "values", ")", ":", "ordered_values", "=", "[", "]", "if", "isinstance", "(", "values", ",", "list", ")", ":", "for", "index", ",", "value", "in", "enumerate", "(", "values", ")", ":", "cleaned_value", "=", "re", ".", "sub", "(", "r'^\\{\\d+\\}'", ",", "''", ",", "value", ")", "ordered_values", ".", "append", "(", "'{'", "+", "str", "(", "index", ")", "+", "'}'", "+", "cleaned_value", ")", "return", "ordered_values" ]
https://github.com/ansible-collections/community.general/blob/3faffe8f47968a2400ba3c896c8901c03001a194/plugins/modules/ldap_attrs.py#L192-L201
fabioz/PyDev.Debugger
0f8c02a010fe5690405da1dd30ed72326191ce63
pydevd_attach_to_process/winappdbg/event.py
python
EventHandler.__hook_dll
(self, event)
Hook the requested API calls (in self.apiHooks). This method is called automatically whenever a DLL is loaded.
Hook the requested API calls (in self.apiHooks).
[ "Hook", "the", "requested", "API", "calls", "(", "in", "self", ".", "apiHooks", ")", "." ]
def __hook_dll(self, event): """ Hook the requested API calls (in self.apiHooks). This method is called automatically whenever a DLL is loaded. """ debug = event.debug pid = event.get_pid() for hook_api_stub in self.__get_hooks_for_dll(event): hook_api_stub.hook(debug, pid)
[ "def", "__hook_dll", "(", "self", ",", "event", ")", ":", "debug", "=", "event", ".", "debug", "pid", "=", "event", ".", "get_pid", "(", ")", "for", "hook_api_stub", "in", "self", ".", "__get_hooks_for_dll", "(", "event", ")", ":", "hook_api_stub", ".", "hook", "(", "debug", ",", "pid", ")" ]
https://github.com/fabioz/PyDev.Debugger/blob/0f8c02a010fe5690405da1dd30ed72326191ce63/pydevd_attach_to_process/winappdbg/event.py#L1429-L1438
beetbox/beets
2fea53c34dd505ba391cb345424e0613901c8025
beetsplug/duplicates.py
python
DuplicatesPlugin._merge
(self, objs)
return objs
Merge duplicate items. See ``_merge_items`` and ``_merge_albums`` for the relevant strategies.
Merge duplicate items. See ``_merge_items`` and ``_merge_albums`` for the relevant strategies.
[ "Merge", "duplicate", "items", ".", "See", "_merge_items", "and", "_merge_albums", "for", "the", "relevant", "strategies", "." ]
def _merge(self, objs): """Merge duplicate items. See ``_merge_items`` and ``_merge_albums`` for the relevant strategies. """ kind = Item if all(isinstance(o, Item) for o in objs) else Album if kind is Item: objs = self._merge_items(objs) else: objs = self._merge_albums(objs) return objs
[ "def", "_merge", "(", "self", ",", "objs", ")", ":", "kind", "=", "Item", "if", "all", "(", "isinstance", "(", "o", ",", "Item", ")", "for", "o", "in", "objs", ")", "else", "Album", "if", "kind", "is", "Item", ":", "objs", "=", "self", ".", "_merge_items", "(", "objs", ")", "else", ":", "objs", "=", "self", ".", "_merge_albums", "(", "objs", ")", "return", "objs" ]
https://github.com/beetbox/beets/blob/2fea53c34dd505ba391cb345424e0613901c8025/beetsplug/duplicates.py#L321-L330
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/wagtail_bak/wagtailsearch/backends/elasticsearch.py
python
ElasticsearchIndex.add_items
(self, model, items)
[]
def add_items(self, model, items): if not class_is_indexed(model): return # Get mapping mapping = self.mapping_class(model) doc_type = mapping.get_document_type() # Create list of actions actions = [] for item in items: # Create the action action = { '_index': self.name, '_type': doc_type, '_id': mapping.get_document_id(item), } action.update(mapping.get_document(item)) actions.append(action) # Run the actions bulk(self.es, actions)
[ "def", "add_items", "(", "self", ",", "model", ",", "items", ")", ":", "if", "not", "class_is_indexed", "(", "model", ")", ":", "return", "# Get mapping", "mapping", "=", "self", ".", "mapping_class", "(", "model", ")", "doc_type", "=", "mapping", ".", "get_document_type", "(", ")", "# Create list of actions", "actions", "=", "[", "]", "for", "item", "in", "items", ":", "# Create the action", "action", "=", "{", "'_index'", ":", "self", ".", "name", ",", "'_type'", ":", "doc_type", ",", "'_id'", ":", "mapping", ".", "get_document_id", "(", "item", ")", ",", "}", "action", ".", "update", "(", "mapping", ".", "get_document", "(", "item", ")", ")", "actions", ".", "append", "(", "action", ")", "# Run the actions", "bulk", "(", "self", ".", "es", ",", "actions", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/wagtail_bak/wagtailsearch/backends/elasticsearch.py#L559-L580
pm4py/pm4py-core
7807b09a088b02199cd0149d724d0e28793971bf
pm4py/visualization/dfg/visualizer.py
python
save
(gviz, output_file_path)
Save the diagram Parameters ----------- gviz GraphViz diagram output_file_path Path where the GraphViz output should be saved
Save the diagram
[ "Save", "the", "diagram" ]
def save(gviz, output_file_path): """ Save the diagram Parameters ----------- gviz GraphViz diagram output_file_path Path where the GraphViz output should be saved """ gsave.save(gviz, output_file_path)
[ "def", "save", "(", "gviz", ",", "output_file_path", ")", ":", "gsave", ".", "save", "(", "gviz", ",", "output_file_path", ")" ]
https://github.com/pm4py/pm4py-core/blob/7807b09a088b02199cd0149d724d0e28793971bf/pm4py/visualization/dfg/visualizer.py#L69-L80
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/orm/interfaces.py
python
StrategizedProperty.post_instrument_class
(self, mapper)
[]
def post_instrument_class(self, mapper): if not self.parent.non_primary and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper)
[ "def", "post_instrument_class", "(", "self", ",", "mapper", ")", ":", "if", "not", "self", ".", "parent", ".", "non_primary", "and", "not", "mapper", ".", "class_manager", ".", "_attr_has_impl", "(", "self", ".", "key", ")", ":", "self", ".", "strategy", ".", "init_class_attribute", "(", "mapper", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/orm/interfaces.py#L541-L544
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/rigged_configurations/kr_tableaux.py
python
KRTableauxTypeFromRCElement.phi
(self, i)
return TensorProductOfRegularCrystalsElement.phi(self, i)
r""" Compute `\varphi_i` of ``self``. .. TODO:: Compute `\phi_0` without moving to KR crystals. EXAMPLES:: sage: KRT = crystals.KirillovReshetikhin(['D',4,3], 2, 2, model='KR') sage: KRT.module_generators[0].phi(0) 0
r""" Compute `\varphi_i` of ``self``.
[ "r", "Compute", "\\", "varphi_i", "of", "self", "." ]
def phi(self, i): r""" Compute `\varphi_i` of ``self``. .. TODO:: Compute `\phi_0` without moving to KR crystals. EXAMPLES:: sage: KRT = crystals.KirillovReshetikhin(['D',4,3], 2, 2, model='KR') sage: KRT.module_generators[0].phi(0) 0 """ if i == self.parent().cartan_type().special_node(): P = self.parent() from sage.combinat.rigged_configurations.tensor_product_kr_tableaux import TensorProductOfKirillovReshetikhinTableaux K = TensorProductOfKirillovReshetikhinTableaux(P.cartan_type(), [[2, P.s()]]) rc = K(self).to_rigged_configuration().to_virtual_configuration() return rc.phi(0) return TensorProductOfRegularCrystalsElement.phi(self, i)
[ "def", "phi", "(", "self", ",", "i", ")", ":", "if", "i", "==", "self", ".", "parent", "(", ")", ".", "cartan_type", "(", ")", ".", "special_node", "(", ")", ":", "P", "=", "self", ".", "parent", "(", ")", "from", "sage", ".", "combinat", ".", "rigged_configurations", ".", "tensor_product_kr_tableaux", "import", "TensorProductOfKirillovReshetikhinTableaux", "K", "=", "TensorProductOfKirillovReshetikhinTableaux", "(", "P", ".", "cartan_type", "(", ")", ",", "[", "[", "2", ",", "P", ".", "s", "(", ")", "]", "]", ")", "rc", "=", "K", "(", "self", ")", ".", "to_rigged_configuration", "(", ")", ".", "to_virtual_configuration", "(", ")", "return", "rc", ".", "phi", "(", "0", ")", "return", "TensorProductOfRegularCrystalsElement", ".", "phi", "(", "self", ",", "i", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/rigged_configurations/kr_tableaux.py#L1756-L1776
spesmilo/electrum
bdbd59300fbd35b01605e66145458e5f396108e8
electrum/gui/qt/installwizard.py
python
InstallWizard.show_seed_dialog
(self, run_next, seed_text)
return slayout.is_ext
[]
def show_seed_dialog(self, run_next, seed_text): title = _("Your wallet generation seed is:") slayout = SeedLayout( seed=seed_text, title=title, msg=True, options=['ext'], config=self.config, ) self.exec_layout(slayout) return slayout.is_ext
[ "def", "show_seed_dialog", "(", "self", ",", "run_next", ",", "seed_text", ")", ":", "title", "=", "_", "(", "\"Your wallet generation seed is:\"", ")", "slayout", "=", "SeedLayout", "(", "seed", "=", "seed_text", ",", "title", "=", "title", ",", "msg", "=", "True", ",", "options", "=", "[", "'ext'", "]", ",", "config", "=", "self", ".", "config", ",", ")", "self", ".", "exec_layout", "(", "slayout", ")", "return", "slayout", ".", "is_ext" ]
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/gui/qt/installwizard.py#L516-L526
OpenMDAO/OpenMDAO
f47eb5485a0bb5ea5d2ae5bd6da4b94dc6b296bd
openmdao/components/spline_comp.py
python
SplineComp.setup
(self)
Perform some final setup and checks.
Perform some final setup and checks.
[ "Perform", "some", "final", "setup", "and", "checks", "." ]
def setup(self): """ Perform some final setup and checks. """ interp_method = self.options['method'] x_cp_val = self.options['x_cp_val'] n_cp = self.options['num_cp'] if x_cp_val is not None: if interp_method == 'bsplines': msg = "{}: 'x_cp_val' is not a valid option when using method 'bsplines'. " msg += "Set 'num_cp' instead." raise ValueError(msg.format(self.msginfo)) if n_cp is not None: msg = "{}: It is not valid to set both options 'x_cp_val' and 'num_cp'." raise ValueError(msg.format(self.msginfo)) grid = np.asarray(x_cp_val) n_cp = len(grid) elif n_cp is not None: grid = np.linspace(0, 1.0, n_cp) else: msg = "{}: Either option 'x_cp_val' or 'num_cp' must be set." raise ValueError(msg.format(self.msginfo)) self._n_cp = n_cp opts = {} if 'interp_options' in self.options: opts = self.options['interp_options'] vec_size = self.options['vec_size'] n_interp = len(self.options['x_interp_val']) for y_cp_name, y_interp_name, y_cp_val, y_units in self._spline_cache: self.add_output(y_interp_name, np.ones((vec_size, n_interp)), units=y_units) if y_cp_val is None: y_cp_val = np.ones((vec_size, n_cp)) elif len(y_cp_val.shape) < 2: y_cp_val = y_cp_val.reshape((vec_size, n_cp)) self.add_input(name=y_cp_name, val=y_cp_val, units=y_units) self.interp_to_cp[y_interp_name] = y_cp_name row = np.repeat(np.arange(n_interp), n_cp) col = np.tile(np.arange(n_cp), n_interp) rows = np.tile(row, vec_size) + \ np.repeat(n_interp * np.arange(vec_size), n_interp * n_cp) cols = np.tile(col, vec_size) + np.repeat(n_cp * np.arange(vec_size), n_interp * n_cp) self.declare_partials(y_interp_name, y_cp_name, rows=rows, cols=cols) # Separate data for each vec_size, but we only need to do sizing, so just pass # in the first. Most interps aren't vectorized. cp_val = y_cp_val[0, :] self.interps[y_interp_name] = InterpND(points=(grid, ), values=cp_val, method=interp_method, x_interp=self.options['x_interp_val'], extrapolate=True, **opts) # The scipy methods do not support complex step. if self.options['method'].startswith('scipy'): self.set_check_partial_options('*', method='fd')
[ "def", "setup", "(", "self", ")", ":", "interp_method", "=", "self", ".", "options", "[", "'method'", "]", "x_cp_val", "=", "self", ".", "options", "[", "'x_cp_val'", "]", "n_cp", "=", "self", ".", "options", "[", "'num_cp'", "]", "if", "x_cp_val", "is", "not", "None", ":", "if", "interp_method", "==", "'bsplines'", ":", "msg", "=", "\"{}: 'x_cp_val' is not a valid option when using method 'bsplines'. \"", "msg", "+=", "\"Set 'num_cp' instead.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "self", ".", "msginfo", ")", ")", "if", "n_cp", "is", "not", "None", ":", "msg", "=", "\"{}: It is not valid to set both options 'x_cp_val' and 'num_cp'.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "self", ".", "msginfo", ")", ")", "grid", "=", "np", ".", "asarray", "(", "x_cp_val", ")", "n_cp", "=", "len", "(", "grid", ")", "elif", "n_cp", "is", "not", "None", ":", "grid", "=", "np", ".", "linspace", "(", "0", ",", "1.0", ",", "n_cp", ")", "else", ":", "msg", "=", "\"{}: Either option 'x_cp_val' or 'num_cp' must be set.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "self", ".", "msginfo", ")", ")", "self", ".", "_n_cp", "=", "n_cp", "opts", "=", "{", "}", "if", "'interp_options'", "in", "self", ".", "options", ":", "opts", "=", "self", ".", "options", "[", "'interp_options'", "]", "vec_size", "=", "self", ".", "options", "[", "'vec_size'", "]", "n_interp", "=", "len", "(", "self", ".", "options", "[", "'x_interp_val'", "]", ")", "for", "y_cp_name", ",", "y_interp_name", ",", "y_cp_val", ",", "y_units", "in", "self", ".", "_spline_cache", ":", "self", ".", "add_output", "(", "y_interp_name", ",", "np", ".", "ones", "(", "(", "vec_size", ",", "n_interp", ")", ")", ",", "units", "=", "y_units", ")", "if", "y_cp_val", "is", "None", ":", "y_cp_val", "=", "np", ".", "ones", "(", "(", "vec_size", ",", "n_cp", ")", ")", "elif", "len", "(", "y_cp_val", ".", "shape", ")", "<", "2", ":", "y_cp_val", "=", "y_cp_val", ".", "reshape", "(", "(", "vec_size", ",", "n_cp", ")", ")", "self", ".", "add_input", "(", "name", "=", "y_cp_name", ",", "val", "=", "y_cp_val", ",", "units", "=", "y_units", ")", "self", ".", "interp_to_cp", "[", "y_interp_name", "]", "=", "y_cp_name", "row", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "n_interp", ")", ",", "n_cp", ")", "col", "=", "np", ".", "tile", "(", "np", ".", "arange", "(", "n_cp", ")", ",", "n_interp", ")", "rows", "=", "np", ".", "tile", "(", "row", ",", "vec_size", ")", "+", "np", ".", "repeat", "(", "n_interp", "*", "np", ".", "arange", "(", "vec_size", ")", ",", "n_interp", "*", "n_cp", ")", "cols", "=", "np", ".", "tile", "(", "col", ",", "vec_size", ")", "+", "np", ".", "repeat", "(", "n_cp", "*", "np", ".", "arange", "(", "vec_size", ")", ",", "n_interp", "*", "n_cp", ")", "self", ".", "declare_partials", "(", "y_interp_name", ",", "y_cp_name", ",", "rows", "=", "rows", ",", "cols", "=", "cols", ")", "# Separate data for each vec_size, but we only need to do sizing, so just pass", "# in the first. Most interps aren't vectorized.", "cp_val", "=", "y_cp_val", "[", "0", ",", ":", "]", "self", ".", "interps", "[", "y_interp_name", "]", "=", "InterpND", "(", "points", "=", "(", "grid", ",", ")", ",", "values", "=", "cp_val", ",", "method", "=", "interp_method", ",", "x_interp", "=", "self", ".", "options", "[", "'x_interp_val'", "]", ",", "extrapolate", "=", "True", ",", "*", "*", "opts", ")", "# The scipy methods do not support complex step.", "if", "self", ".", "options", "[", "'method'", "]", ".", "startswith", "(", "'scipy'", ")", ":", "self", ".", "set_check_partial_options", "(", "'*'", ",", "method", "=", "'fd'", ")" ]
https://github.com/OpenMDAO/OpenMDAO/blob/f47eb5485a0bb5ea5d2ae5bd6da4b94dc6b296bd/openmdao/components/spline_comp.py#L83-L153
dask/dask
c2b962fec1ba45440fe928869dc64cfe9cc36506
dask/local.py
python
finish_task
( dsk, key, state, results, sortkey, delete=True, release_data=release_data )
return state
Update execution state after a task finishes Mutates. This should run atomically (with a lock).
Update execution state after a task finishes
[ "Update", "execution", "state", "after", "a", "task", "finishes" ]
def finish_task( dsk, key, state, results, sortkey, delete=True, release_data=release_data ): """ Update execution state after a task finishes Mutates. This should run atomically (with a lock). """ for dep in sorted(state["dependents"][key], key=sortkey, reverse=True): s = state["waiting"][dep] s.remove(key) if not s: del state["waiting"][dep] state["ready"].append(dep) for dep in state["dependencies"][key]: if dep in state["waiting_data"]: s = state["waiting_data"][dep] s.remove(key) if not s and dep not in results: release_data(dep, state, delete=delete) elif delete and dep not in results: release_data(dep, state, delete=delete) state["finished"].add(key) state["running"].remove(key) return state
[ "def", "finish_task", "(", "dsk", ",", "key", ",", "state", ",", "results", ",", "sortkey", ",", "delete", "=", "True", ",", "release_data", "=", "release_data", ")", ":", "for", "dep", "in", "sorted", "(", "state", "[", "\"dependents\"", "]", "[", "key", "]", ",", "key", "=", "sortkey", ",", "reverse", "=", "True", ")", ":", "s", "=", "state", "[", "\"waiting\"", "]", "[", "dep", "]", "s", ".", "remove", "(", "key", ")", "if", "not", "s", ":", "del", "state", "[", "\"waiting\"", "]", "[", "dep", "]", "state", "[", "\"ready\"", "]", ".", "append", "(", "dep", ")", "for", "dep", "in", "state", "[", "\"dependencies\"", "]", "[", "key", "]", ":", "if", "dep", "in", "state", "[", "\"waiting_data\"", "]", ":", "s", "=", "state", "[", "\"waiting_data\"", "]", "[", "dep", "]", "s", ".", "remove", "(", "key", ")", "if", "not", "s", "and", "dep", "not", "in", "results", ":", "release_data", "(", "dep", ",", "state", ",", "delete", "=", "delete", ")", "elif", "delete", "and", "dep", "not", "in", "results", ":", "release_data", "(", "dep", ",", "state", ",", "delete", "=", "delete", ")", "state", "[", "\"finished\"", "]", ".", "add", "(", "key", ")", "state", "[", "\"running\"", "]", ".", "remove", "(", "key", ")", "return", "state" ]
https://github.com/dask/dask/blob/c2b962fec1ba45440fe928869dc64cfe9cc36506/dask/local.py#L254-L281
statsmodels/statsmodels
debbe7ea6ba28fe5bdb78f09f8cac694bef98722
statsmodels/nonparametric/bandwidths.py
python
select_bandwidth
(x, bw, kernel)
Selects bandwidth for a selection rule bw this is a wrapper around existing bandwidth selection rules Parameters ---------- x : array_like Array for which to get the bandwidth bw : str name of bandwidth selection rule, currently supported are: %s kernel : not used yet Returns ------- bw : float The estimate of the bandwidth
Selects bandwidth for a selection rule bw
[ "Selects", "bandwidth", "for", "a", "selection", "rule", "bw" ]
def select_bandwidth(x, bw, kernel): """ Selects bandwidth for a selection rule bw this is a wrapper around existing bandwidth selection rules Parameters ---------- x : array_like Array for which to get the bandwidth bw : str name of bandwidth selection rule, currently supported are: %s kernel : not used yet Returns ------- bw : float The estimate of the bandwidth """ bw = bw.lower() if bw not in bandwidth_funcs: raise ValueError("Bandwidth %s not understood" % bw) bandwidth = bandwidth_funcs[bw](x, kernel) if np.any(bandwidth == 0): # eventually this can fall back on another selection criterion. err = "Selected KDE bandwidth is 0. Cannot estimate density. " \ "Either provide the bandwidth during initialization or use " \ "an alternative method." raise RuntimeError(err) else: return bandwidth
[ "def", "select_bandwidth", "(", "x", ",", "bw", ",", "kernel", ")", ":", "bw", "=", "bw", ".", "lower", "(", ")", "if", "bw", "not", "in", "bandwidth_funcs", ":", "raise", "ValueError", "(", "\"Bandwidth %s not understood\"", "%", "bw", ")", "bandwidth", "=", "bandwidth_funcs", "[", "bw", "]", "(", "x", ",", "kernel", ")", "if", "np", ".", "any", "(", "bandwidth", "==", "0", ")", ":", "# eventually this can fall back on another selection criterion.", "err", "=", "\"Selected KDE bandwidth is 0. Cannot estimate density. \"", "\"Either provide the bandwidth during initialization or use \"", "\"an alternative method.\"", "raise", "RuntimeError", "(", "err", ")", "else", ":", "return", "bandwidth" ]
https://github.com/statsmodels/statsmodels/blob/debbe7ea6ba28fe5bdb78f09f8cac694bef98722/statsmodels/nonparametric/bandwidths.py#L153-L184
SpotlightKid/python-rtmidi
8231a95c9b9c43e3183cf5ef8d28218ef3ae7e83
examples/advanced/midioutwrapper.py
python
MidiOutWrapper.send_nrpn
(self, param=0, value=0, ch=None)
Send a Non-Registered Parameter Number (NRPN) Change via a series of CC messages.
Send a Non-Registered Parameter Number (NRPN) Change via a series of CC messages.
[ "Send", "a", "Non", "-", "Registered", "Parameter", "Number", "(", "NRPN", ")", "Change", "via", "a", "series", "of", "CC", "messages", "." ]
def send_nrpn(self, param=0, value=0, ch=None): """Send a Non-Registered Parameter Number (NRPN) Change via a series of CC messages.""" if isinstance(param, int): param_msb = (param >> 7) param_lsb = param else: param_msb, param_lsb = param if param_msb is not None: self.send_control_change(NRPN_MSB, param_msb, ch=ch) if param_lsb is not None: self.send_control_change(NRPN_LSB, param_lsb, ch=ch) if isinstance(value, int): value_msb = (value >> 7) value_lsb = value else: value_msb, value_lsb = value if value_msb is not None: self.send_control_change(DATA_ENTRY_MSB, value_msb, ch=ch) if value_lsb is not None: self.send_control_change(DATA_ENTRY_LSB, value_lsb, ch=ch)
[ "def", "send_nrpn", "(", "self", ",", "param", "=", "0", ",", "value", "=", "0", ",", "ch", "=", "None", ")", ":", "if", "isinstance", "(", "param", ",", "int", ")", ":", "param_msb", "=", "(", "param", ">>", "7", ")", "param_lsb", "=", "param", "else", ":", "param_msb", ",", "param_lsb", "=", "param", "if", "param_msb", "is", "not", "None", ":", "self", ".", "send_control_change", "(", "NRPN_MSB", ",", "param_msb", ",", "ch", "=", "ch", ")", "if", "param_lsb", "is", "not", "None", ":", "self", ".", "send_control_change", "(", "NRPN_LSB", ",", "param_lsb", ",", "ch", "=", "ch", ")", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value_msb", "=", "(", "value", ">>", "7", ")", "value_lsb", "=", "value", "else", ":", "value_msb", ",", "value_lsb", "=", "value", "if", "value_msb", "is", "not", "None", ":", "self", ".", "send_control_change", "(", "DATA_ENTRY_MSB", ",", "value_msb", ",", "ch", "=", "ch", ")", "if", "value_lsb", "is", "not", "None", ":", "self", ".", "send_control_change", "(", "DATA_ENTRY_LSB", ",", "value_lsb", ",", "ch", "=", "ch", ")" ]
https://github.com/SpotlightKid/python-rtmidi/blob/8231a95c9b9c43e3183cf5ef8d28218ef3ae7e83/examples/advanced/midioutwrapper.py#L187-L211
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/OpenSSL/crypto.py
python
PKCS12.set_friendlyname
(self, name)
Set the friendly name in the PKCS #12 structure. :param name: The new friendly name, or :py:const:`None` to unset. :type name: :py:class:`bytes` or :py:const:`None` :return: ``None``
Set the friendly name in the PKCS #12 structure.
[ "Set", "the", "friendly", "name", "in", "the", "PKCS", "#12", "structure", "." ]
def set_friendlyname(self, name): """ Set the friendly name in the PKCS #12 structure. :param name: The new friendly name, or :py:const:`None` to unset. :type name: :py:class:`bytes` or :py:const:`None` :return: ``None`` """ if name is None: self._friendlyname = None elif not isinstance(name, bytes): raise TypeError( "name must be a byte string or None (not %r)" % (name,) ) self._friendlyname = name
[ "def", "set_friendlyname", "(", "self", ",", "name", ")", ":", "if", "name", "is", "None", ":", "self", ".", "_friendlyname", "=", "None", "elif", "not", "isinstance", "(", "name", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"name must be a byte string or None (not %r)\"", "%", "(", "name", ",", ")", ")", "self", ".", "_friendlyname", "=", "name" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/OpenSSL/crypto.py#L2486-L2501
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/tlinewidgets.py
python
TimeLineCanvas.get_pointer_context
(self, x, y)
return appconsts.POINTER_CONTEXT_NONE
[]
def get_pointer_context(self, x, y): frame = get_frame(x) hit_compositor = compositor_hit(frame, x, y, current_sequence().compositors) if hit_compositor != None: if editorstate.get_compositing_mode() == appconsts.COMPOSITING_MODE_STANDARD_AUTO_FOLLOW: return appconsts.POINTER_CONTEXT_NONE if editorstate.auto_follow_active() == False or (editorstate.auto_follow_active() == True and hit_compositor.obey_autofollow == False): return compositormodes.get_pointer_context(hit_compositor, x) else: return appconsts.POINTER_CONTEXT_NONE track = get_track(y) if track == None: return appconsts.POINTER_CONTEXT_NONE clip_index = current_sequence().get_clip_index(track, frame) if clip_index == -1: # frame after last clip on track return appconsts.POINTER_CONTEXT_NONE try: clip = track.clips[clip_index] except: return appconsts.POINTER_CONTEXT_NONE # We probably should not hit this clip_start_frame = track.clip_start(clip_index) clip_end_frame = track.clip_start(clip_index + 1) # INSERT, OVEWRITE if (EDIT_MODE() == editorstate.INSERT_MOVE or EDIT_MODE() == editorstate.OVERWRITE_MOVE) and editorstate.overwrite_mode_box == False: if abs(x - _get_frame_x(clip_start_frame)) < DRAG_SENSITIVITY_AREA_WIDTH_PIX: return appconsts.POINTER_CONTEXT_END_DRAG_LEFT if abs(x - _get_frame_x(clip_end_frame)) < DRAG_SENSITIVITY_AREA_WIDTH_PIX: return appconsts.POINTER_CONTEXT_END_DRAG_RIGHT return appconsts.POINTER_CONTEXT_NONE # TRIM elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM or EDIT_MODE() == editorstate.ONE_ROLL_TRIM_NO_EDIT: if abs(frame - clip_start_frame) < abs(frame - clip_end_frame): if clip.is_blanck_clip == True: return appconsts.POINTER_CONTEXT_NONE return appconsts.POINTER_CONTEXT_TRIM_LEFT else: if clip.is_blanck_clip == True: return appconsts.POINTER_CONTEXT_NONE return appconsts.POINTER_CONTEXT_TRIM_RIGHT # BOX elif (EDIT_MODE() == editorstate.OVERWRITE_MOVE and editorstate.overwrite_mode_box == True and boxmove.box_selection_data != None): if boxmove.box_selection_data.is_hit(x, y): return appconsts.POINTER_CONTEXT_BOX_SIDEWAYS # MULTI TRIM elif EDIT_MODE() == editorstate.MULTI_TRIM: editorstate.set_mouse_current_non_drag_pos(x, y) clip_start_frame_x = _get_frame_x(clip_start_frame) clip_end_frame_x = _get_frame_x(clip_end_frame) clip_center_x = (clip_end_frame_x - clip_start_frame_x) / 2 + clip_start_frame_x if abs(x - clip_start_frame_x) < MULTI_TRIM_ROLL_SENSITIVITY_AREA_WIDTH_PIX + 4: # +4, somehow we were getting non-symmetrical areas of sensitivity on different sides of cut, so this was added as quick'n'dirty fix without finding out the root cause. return appconsts.POINTER_CONTEXT_MULTI_ROLL elif abs(x - clip_end_frame_x) < MULTI_TRIM_ROLL_SENSITIVITY_AREA_WIDTH_PIX: return appconsts.POINTER_CONTEXT_MULTI_ROLL elif abs(x - clip_center_x) < MULTI_TRIM_SLIP_SENSITIVITY_AREA_WIDTH_PIX: if clip.is_blanck_clip == True: return appconsts.POINTER_CONTEXT_NONE return appconsts.POINTER_CONTEXT_MULTI_SLIP elif abs(frame - clip_start_frame) < abs(frame - clip_end_frame): if clip.is_blanck_clip == True: return appconsts.POINTER_CONTEXT_NONE return appconsts.POINTER_CONTEXT_TRIM_LEFT else: if clip.is_blanck_clip == True: return appconsts.POINTER_CONTEXT_NONE return appconsts.POINTER_CONTEXT_TRIM_RIGHT return appconsts.POINTER_CONTEXT_NONE
[ "def", "get_pointer_context", "(", "self", ",", "x", ",", "y", ")", ":", "frame", "=", "get_frame", "(", "x", ")", "hit_compositor", "=", "compositor_hit", "(", "frame", ",", "x", ",", "y", ",", "current_sequence", "(", ")", ".", "compositors", ")", "if", "hit_compositor", "!=", "None", ":", "if", "editorstate", ".", "get_compositing_mode", "(", ")", "==", "appconsts", ".", "COMPOSITING_MODE_STANDARD_AUTO_FOLLOW", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "if", "editorstate", ".", "auto_follow_active", "(", ")", "==", "False", "or", "(", "editorstate", ".", "auto_follow_active", "(", ")", "==", "True", "and", "hit_compositor", ".", "obey_autofollow", "==", "False", ")", ":", "return", "compositormodes", ".", "get_pointer_context", "(", "hit_compositor", ",", "x", ")", "else", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "track", "=", "get_track", "(", "y", ")", "if", "track", "==", "None", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "clip_index", "=", "current_sequence", "(", ")", ".", "get_clip_index", "(", "track", ",", "frame", ")", "if", "clip_index", "==", "-", "1", ":", "# frame after last clip on track", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "try", ":", "clip", "=", "track", ".", "clips", "[", "clip_index", "]", "except", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "# We probably should not hit this", "clip_start_frame", "=", "track", ".", "clip_start", "(", "clip_index", ")", "clip_end_frame", "=", "track", ".", "clip_start", "(", "clip_index", "+", "1", ")", "# INSERT, OVEWRITE", "if", "(", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "INSERT_MOVE", "or", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "OVERWRITE_MOVE", ")", "and", "editorstate", ".", "overwrite_mode_box", "==", "False", ":", "if", "abs", "(", "x", "-", "_get_frame_x", "(", "clip_start_frame", ")", ")", "<", "DRAG_SENSITIVITY_AREA_WIDTH_PIX", ":", "return", "appconsts", ".", "POINTER_CONTEXT_END_DRAG_LEFT", "if", "abs", "(", "x", "-", "_get_frame_x", "(", "clip_end_frame", ")", ")", "<", "DRAG_SENSITIVITY_AREA_WIDTH_PIX", ":", "return", "appconsts", ".", "POINTER_CONTEXT_END_DRAG_RIGHT", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "# TRIM", "elif", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "ONE_ROLL_TRIM", "or", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "ONE_ROLL_TRIM_NO_EDIT", ":", "if", "abs", "(", "frame", "-", "clip_start_frame", ")", "<", "abs", "(", "frame", "-", "clip_end_frame", ")", ":", "if", "clip", ".", "is_blanck_clip", "==", "True", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "return", "appconsts", ".", "POINTER_CONTEXT_TRIM_LEFT", "else", ":", "if", "clip", ".", "is_blanck_clip", "==", "True", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "return", "appconsts", ".", "POINTER_CONTEXT_TRIM_RIGHT", "# BOX", "elif", "(", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "OVERWRITE_MOVE", "and", "editorstate", ".", "overwrite_mode_box", "==", "True", "and", "boxmove", ".", "box_selection_data", "!=", "None", ")", ":", "if", "boxmove", ".", "box_selection_data", ".", "is_hit", "(", "x", ",", "y", ")", ":", "return", "appconsts", ".", "POINTER_CONTEXT_BOX_SIDEWAYS", "# MULTI TRIM", "elif", "EDIT_MODE", "(", ")", "==", "editorstate", ".", "MULTI_TRIM", ":", "editorstate", ".", "set_mouse_current_non_drag_pos", "(", "x", ",", "y", ")", "clip_start_frame_x", "=", "_get_frame_x", "(", "clip_start_frame", ")", "clip_end_frame_x", "=", "_get_frame_x", "(", "clip_end_frame", ")", "clip_center_x", "=", "(", "clip_end_frame_x", "-", "clip_start_frame_x", ")", "/", "2", "+", "clip_start_frame_x", "if", "abs", "(", "x", "-", "clip_start_frame_x", ")", "<", "MULTI_TRIM_ROLL_SENSITIVITY_AREA_WIDTH_PIX", "+", "4", ":", "# +4, somehow we were getting non-symmetrical areas of sensitivity on different sides of cut, so this was added as quick'n'dirty fix without finding out the root cause.", "return", "appconsts", ".", "POINTER_CONTEXT_MULTI_ROLL", "elif", "abs", "(", "x", "-", "clip_end_frame_x", ")", "<", "MULTI_TRIM_ROLL_SENSITIVITY_AREA_WIDTH_PIX", ":", "return", "appconsts", ".", "POINTER_CONTEXT_MULTI_ROLL", "elif", "abs", "(", "x", "-", "clip_center_x", ")", "<", "MULTI_TRIM_SLIP_SENSITIVITY_AREA_WIDTH_PIX", ":", "if", "clip", ".", "is_blanck_clip", "==", "True", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "return", "appconsts", ".", "POINTER_CONTEXT_MULTI_SLIP", "elif", "abs", "(", "frame", "-", "clip_start_frame", ")", "<", "abs", "(", "frame", "-", "clip_end_frame", ")", ":", "if", "clip", ".", "is_blanck_clip", "==", "True", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "return", "appconsts", ".", "POINTER_CONTEXT_TRIM_LEFT", "else", ":", "if", "clip", ".", "is_blanck_clip", "==", "True", ":", "return", "appconsts", ".", "POINTER_CONTEXT_NONE", "return", "appconsts", ".", "POINTER_CONTEXT_TRIM_RIGHT", "return", "appconsts", ".", "POINTER_CONTEXT_NONE" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/tlinewidgets.py#L1566-L1640
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/compute/manager.py
python
ComputeManager._post_live_migration_remove_source_vol_connections
( self, context, instance, source_bdms)
Disconnect volume connections from the source host during _post_live_migration. :param context: nova auth RequestContext :param instance: Instance object being live migrated :param source_bdms: BlockDeviceMappingList representing the attached volumes with connection_info set for the source host
Disconnect volume connections from the source host during _post_live_migration.
[ "Disconnect", "volume", "connections", "from", "the", "source", "host", "during", "_post_live_migration", "." ]
def _post_live_migration_remove_source_vol_connections( self, context, instance, source_bdms): """Disconnect volume connections from the source host during _post_live_migration. :param context: nova auth RequestContext :param instance: Instance object being live migrated :param source_bdms: BlockDeviceMappingList representing the attached volumes with connection_info set for the source host """ # Detaching volumes. connector = self.driver.get_volume_connector(instance) for bdm in source_bdms: if bdm.is_volume: # Detaching volumes is a call to an external API that can fail. # If it does, we need to handle it gracefully so that the call # to post_live_migration_at_destination - where we set instance # host and task state - still happens. We need to rethink the # current approach of setting instance host and task state # AFTER a whole bunch of things that could fail in unhandled # ways, but that is left as a TODO(artom). try: if bdm.attachment_id is None: # Prior to cinder v3.44: # We don't want to actually mark the volume detached, # or delete the bdm, just remove the connection from # this host. # # remove the volume connection without detaching from # hypervisor because the instance is not running # anymore on the current host self.volume_api.terminate_connection(context, bdm.volume_id, connector) else: # cinder v3.44 api flow - delete the old attachment # for the source host self.volume_api.attachment_delete(context, bdm.attachment_id) except Exception as e: if bdm.attachment_id is None: LOG.error('Connection for volume %s not terminated on ' 'source host %s during post_live_migration: ' '%s', bdm.volume_id, self.host, str(e), instance=instance) else: LOG.error('Volume attachment %s not deleted on source ' 'host %s during post_live_migration: %s', bdm.attachment_id, self.host, str(e), instance=instance)
[ "def", "_post_live_migration_remove_source_vol_connections", "(", "self", ",", "context", ",", "instance", ",", "source_bdms", ")", ":", "# Detaching volumes.", "connector", "=", "self", ".", "driver", ".", "get_volume_connector", "(", "instance", ")", "for", "bdm", "in", "source_bdms", ":", "if", "bdm", ".", "is_volume", ":", "# Detaching volumes is a call to an external API that can fail.", "# If it does, we need to handle it gracefully so that the call", "# to post_live_migration_at_destination - where we set instance", "# host and task state - still happens. We need to rethink the", "# current approach of setting instance host and task state", "# AFTER a whole bunch of things that could fail in unhandled", "# ways, but that is left as a TODO(artom).", "try", ":", "if", "bdm", ".", "attachment_id", "is", "None", ":", "# Prior to cinder v3.44:", "# We don't want to actually mark the volume detached,", "# or delete the bdm, just remove the connection from", "# this host.", "#", "# remove the volume connection without detaching from", "# hypervisor because the instance is not running", "# anymore on the current host", "self", ".", "volume_api", ".", "terminate_connection", "(", "context", ",", "bdm", ".", "volume_id", ",", "connector", ")", "else", ":", "# cinder v3.44 api flow - delete the old attachment", "# for the source host", "self", ".", "volume_api", ".", "attachment_delete", "(", "context", ",", "bdm", ".", "attachment_id", ")", "except", "Exception", "as", "e", ":", "if", "bdm", ".", "attachment_id", "is", "None", ":", "LOG", ".", "error", "(", "'Connection for volume %s not terminated on '", "'source host %s during post_live_migration: '", "'%s'", ",", "bdm", ".", "volume_id", ",", "self", ".", "host", ",", "str", "(", "e", ")", ",", "instance", "=", "instance", ")", "else", ":", "LOG", ".", "error", "(", "'Volume attachment %s not deleted on source '", "'host %s during post_live_migration: %s'", ",", "bdm", ".", "attachment_id", ",", "self", ".", "host", ",", "str", "(", "e", ")", ",", "instance", "=", "instance", ")" ]
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/compute/manager.py#L8709-L8759
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Base/Scripts/CommonServerPython/CommonServerPython.py
python
return_outputs
(readable_output, outputs=None, raw_response=None, timeline=None, ignore_auto_extract=False)
DEPRECATED: use return_results() instead This function wraps the demisto.results(), makes the usage of returning results to the user more intuitively. :type readable_output: ``str`` | ``int`` :param readable_output: markdown string that will be presented in the warroom, should be human readable - (HumanReadable) :type outputs: ``dict`` :param outputs: the outputs that will be returned to playbook/investigation context (originally EntryContext) :type raw_response: ``dict`` | ``list`` | ``str`` :param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original raw response from the 3rd party service (originally Contents) :type timeline: ``dict`` | ``list`` :param timeline: expects a list, if a dict is passed it will be put into a list. used by server to populate an indicator's timeline. if the 'Category' field is not present in the timeline dict(s), it will automatically be be added to the dict(s) with its value set to 'Integration Update'. :type ignore_auto_extract: ``bool`` :param ignore_auto_extract: expects a bool value. if true then the warroom entry readable_output will not be auto enriched. :return: None :rtype: ``None``
DEPRECATED: use return_results() instead
[ "DEPRECATED", ":", "use", "return_results", "()", "instead" ]
def return_outputs(readable_output, outputs=None, raw_response=None, timeline=None, ignore_auto_extract=False): """ DEPRECATED: use return_results() instead This function wraps the demisto.results(), makes the usage of returning results to the user more intuitively. :type readable_output: ``str`` | ``int`` :param readable_output: markdown string that will be presented in the warroom, should be human readable - (HumanReadable) :type outputs: ``dict`` :param outputs: the outputs that will be returned to playbook/investigation context (originally EntryContext) :type raw_response: ``dict`` | ``list`` | ``str`` :param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original raw response from the 3rd party service (originally Contents) :type timeline: ``dict`` | ``list`` :param timeline: expects a list, if a dict is passed it will be put into a list. used by server to populate an indicator's timeline. if the 'Category' field is not present in the timeline dict(s), it will automatically be be added to the dict(s) with its value set to 'Integration Update'. :type ignore_auto_extract: ``bool`` :param ignore_auto_extract: expects a bool value. if true then the warroom entry readable_output will not be auto enriched. :return: None :rtype: ``None`` """ timeline_list = [timeline] if isinstance(timeline, dict) else timeline if timeline_list: for tl_obj in timeline_list: if 'Category' not in tl_obj.keys(): tl_obj['Category'] = 'Integration Update' return_entry = { "Type": entryTypes["note"], "HumanReadable": readable_output, "ContentsFormat": formats["text"] if isinstance(raw_response, STRING_TYPES) else formats['json'], "Contents": raw_response, "EntryContext": outputs, 'IgnoreAutoExtract': ignore_auto_extract, "IndicatorTimeline": timeline_list } # Return 'readable_output' only if needed if readable_output and not outputs and not raw_response: return_entry["Contents"] = readable_output return_entry["ContentsFormat"] = formats["text"] elif outputs and raw_response is None: # if raw_response was not provided but outputs were provided then set Contents as outputs return_entry["Contents"] = outputs demisto.results(return_entry)
[ "def", "return_outputs", "(", "readable_output", ",", "outputs", "=", "None", ",", "raw_response", "=", "None", ",", "timeline", "=", "None", ",", "ignore_auto_extract", "=", "False", ")", ":", "timeline_list", "=", "[", "timeline", "]", "if", "isinstance", "(", "timeline", ",", "dict", ")", "else", "timeline", "if", "timeline_list", ":", "for", "tl_obj", "in", "timeline_list", ":", "if", "'Category'", "not", "in", "tl_obj", ".", "keys", "(", ")", ":", "tl_obj", "[", "'Category'", "]", "=", "'Integration Update'", "return_entry", "=", "{", "\"Type\"", ":", "entryTypes", "[", "\"note\"", "]", ",", "\"HumanReadable\"", ":", "readable_output", ",", "\"ContentsFormat\"", ":", "formats", "[", "\"text\"", "]", "if", "isinstance", "(", "raw_response", ",", "STRING_TYPES", ")", "else", "formats", "[", "'json'", "]", ",", "\"Contents\"", ":", "raw_response", ",", "\"EntryContext\"", ":", "outputs", ",", "'IgnoreAutoExtract'", ":", "ignore_auto_extract", ",", "\"IndicatorTimeline\"", ":", "timeline_list", "}", "# Return 'readable_output' only if needed", "if", "readable_output", "and", "not", "outputs", "and", "not", "raw_response", ":", "return_entry", "[", "\"Contents\"", "]", "=", "readable_output", "return_entry", "[", "\"ContentsFormat\"", "]", "=", "formats", "[", "\"text\"", "]", "elif", "outputs", "and", "raw_response", "is", "None", ":", "# if raw_response was not provided but outputs were provided then set Contents as outputs", "return_entry", "[", "\"Contents\"", "]", "=", "outputs", "demisto", ".", "results", "(", "return_entry", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Base/Scripts/CommonServerPython/CommonServerPython.py#L6222-L6272
pyinvoke/invoke
45dc9d03639dac5b6d1445831bf270e686ef88b4
invoke/collection.py
python
Collection.__init__
(self, *args, **kwargs)
Create a new task collection/namespace. `.Collection` offers a set of methods for building a collection of tasks from scratch, plus a convenient constructor wrapping said API. In either case: * The first positional argument may be a string, which (if given) is used as the collection's default name when performing namespace lookups; * A ``loaded_from`` keyword argument may be given, which sets metadata indicating the filesystem path the collection was loaded from. This is used as a guide when loading per-project :ref:`configuration files <config-hierarchy>`. * An ``auto_dash_names`` kwarg may be given, controlling whether task and collection names have underscores turned to dashes in most cases; it defaults to ``True`` but may be set to ``False`` to disable. The CLI machinery will pass in the value of the ``tasks.auto_dash_names`` config value to this kwarg. **The method approach** May initialize with no arguments and use methods (e.g. `.add_task`/`.add_collection`) to insert objects:: c = Collection() c.add_task(some_task) If an initial string argument is given, it is used as the default name for this collection, should it be inserted into another collection as a sub-namespace:: docs = Collection('docs') docs.add_task(doc_task) ns = Collection() ns.add_task(top_level_task) ns.add_collection(docs) # Valid identifiers are now 'top_level_task' and 'docs.doc_task' # (assuming the task objects were actually named the same as the # variables we're using :)) For details, see the API docs for the rest of the class. **The constructor approach** All ``*args`` given to `.Collection` (besides the abovementioned optional positional 'name' argument and ``loaded_from`` kwarg) are expected to be `.Task` or `.Collection` instances which will be passed to `.add_task`/`.add_collection` as appropriate. Module objects are also valid (as they are for `.add_collection`). For example, the below snippet results in the same two task identifiers as the one above:: ns = Collection(top_level_task, Collection('docs', doc_task)) If any ``**kwargs`` are given, the keywords are used as the initial name arguments for the respective values:: ns = Collection( top_level_task=some_other_task, docs=Collection(doc_task) ) That's exactly equivalent to:: docs = Collection(doc_task) ns = Collection() ns.add_task(some_other_task, 'top_level_task') ns.add_collection(docs, 'docs') See individual methods' API docs for details.
Create a new task collection/namespace.
[ "Create", "a", "new", "task", "collection", "/", "namespace", "." ]
def __init__(self, *args, **kwargs): """ Create a new task collection/namespace. `.Collection` offers a set of methods for building a collection of tasks from scratch, plus a convenient constructor wrapping said API. In either case: * The first positional argument may be a string, which (if given) is used as the collection's default name when performing namespace lookups; * A ``loaded_from`` keyword argument may be given, which sets metadata indicating the filesystem path the collection was loaded from. This is used as a guide when loading per-project :ref:`configuration files <config-hierarchy>`. * An ``auto_dash_names`` kwarg may be given, controlling whether task and collection names have underscores turned to dashes in most cases; it defaults to ``True`` but may be set to ``False`` to disable. The CLI machinery will pass in the value of the ``tasks.auto_dash_names`` config value to this kwarg. **The method approach** May initialize with no arguments and use methods (e.g. `.add_task`/`.add_collection`) to insert objects:: c = Collection() c.add_task(some_task) If an initial string argument is given, it is used as the default name for this collection, should it be inserted into another collection as a sub-namespace:: docs = Collection('docs') docs.add_task(doc_task) ns = Collection() ns.add_task(top_level_task) ns.add_collection(docs) # Valid identifiers are now 'top_level_task' and 'docs.doc_task' # (assuming the task objects were actually named the same as the # variables we're using :)) For details, see the API docs for the rest of the class. **The constructor approach** All ``*args`` given to `.Collection` (besides the abovementioned optional positional 'name' argument and ``loaded_from`` kwarg) are expected to be `.Task` or `.Collection` instances which will be passed to `.add_task`/`.add_collection` as appropriate. Module objects are also valid (as they are for `.add_collection`). For example, the below snippet results in the same two task identifiers as the one above:: ns = Collection(top_level_task, Collection('docs', doc_task)) If any ``**kwargs`` are given, the keywords are used as the initial name arguments for the respective values:: ns = Collection( top_level_task=some_other_task, docs=Collection(doc_task) ) That's exactly equivalent to:: docs = Collection(doc_task) ns = Collection() ns.add_task(some_other_task, 'top_level_task') ns.add_collection(docs, 'docs') See individual methods' API docs for details. """ # Initialize self.tasks = Lexicon() self.collections = Lexicon() self.default = None self.name = None self._configuration = {} # Specific kwargs if applicable self.loaded_from = kwargs.pop("loaded_from", None) self.auto_dash_names = kwargs.pop("auto_dash_names", None) # splat-kwargs version of default value (auto_dash_names=True) if self.auto_dash_names is None: self.auto_dash_names = True # Name if applicable args = list(args) if args and isinstance(args[0], six.string_types): self.name = self.transform(args.pop(0)) # Dispatch args/kwargs for arg in args: self._add_object(arg) # Dispatch kwargs for name, obj in six.iteritems(kwargs): self._add_object(obj, name)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Initialize", "self", ".", "tasks", "=", "Lexicon", "(", ")", "self", ".", "collections", "=", "Lexicon", "(", ")", "self", ".", "default", "=", "None", "self", ".", "name", "=", "None", "self", ".", "_configuration", "=", "{", "}", "# Specific kwargs if applicable", "self", ".", "loaded_from", "=", "kwargs", ".", "pop", "(", "\"loaded_from\"", ",", "None", ")", "self", ".", "auto_dash_names", "=", "kwargs", ".", "pop", "(", "\"auto_dash_names\"", ",", "None", ")", "# splat-kwargs version of default value (auto_dash_names=True)", "if", "self", ".", "auto_dash_names", "is", "None", ":", "self", ".", "auto_dash_names", "=", "True", "# Name if applicable", "args", "=", "list", "(", "args", ")", "if", "args", "and", "isinstance", "(", "args", "[", "0", "]", ",", "six", ".", "string_types", ")", ":", "self", ".", "name", "=", "self", ".", "transform", "(", "args", ".", "pop", "(", "0", ")", ")", "# Dispatch args/kwargs", "for", "arg", "in", "args", ":", "self", ".", "_add_object", "(", "arg", ")", "# Dispatch kwargs", "for", "name", ",", "obj", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "self", ".", "_add_object", "(", "obj", ",", "name", ")" ]
https://github.com/pyinvoke/invoke/blob/45dc9d03639dac5b6d1445831bf270e686ef88b4/invoke/collection.py#L18-L113
llSourcell/AI_Artist
3038c06c2e389b9c919c881c9a169efe2fd7810e
lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
python
WorkingSet.subscribe
(self, callback)
Invoke `callback` for all distributions (including existing ones)
Invoke `callback` for all distributions (including existing ones)
[ "Invoke", "callback", "for", "all", "distributions", "(", "including", "existing", "ones", ")" ]
def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist)
[ "def", "subscribe", "(", "self", ",", "callback", ")", ":", "if", "callback", "in", "self", ".", "callbacks", ":", "return", "self", ".", "callbacks", ".", "append", "(", "callback", ")", "for", "dist", "in", "self", ":", "callback", "(", "dist", ")" ]
https://github.com/llSourcell/AI_Artist/blob/3038c06c2e389b9c919c881c9a169efe2fd7810e/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L950-L956
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/distutils/archive_util.py
python
make_zipfile
(base_name, base_dir, verbose=0, dry_run=0)
return zip_filename
Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file.
Create a zip file from all the files under 'base_dir'.
[ "Create", "a", "zip", "file", "from", "all", "the", "files", "under", "base_dir", "." ]
def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ try: import zipfile except ImportError: zipfile = None zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) # If zipfile module is not available, try spawning an external # 'zip' command. if zipfile is None: if verbose: zipoptions = "-r" else: zipoptions = "-rq" try: spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". raise DistutilsExecError, \ ("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename else: log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) log.info("adding '%s'" % path) zip.close() return zip_filename
[ "def", "make_zipfile", "(", "base_name", ",", "base_dir", ",", "verbose", "=", "0", ",", "dry_run", "=", "0", ")", ":", "try", ":", "import", "zipfile", "except", "ImportError", ":", "zipfile", "=", "None", "zip_filename", "=", "base_name", "+", "\".zip\"", "mkpath", "(", "os", ".", "path", ".", "dirname", "(", "zip_filename", ")", ",", "dry_run", "=", "dry_run", ")", "# If zipfile module is not available, try spawning an external", "# 'zip' command.", "if", "zipfile", "is", "None", ":", "if", "verbose", ":", "zipoptions", "=", "\"-r\"", "else", ":", "zipoptions", "=", "\"-rq\"", "try", ":", "spawn", "(", "[", "\"zip\"", ",", "zipoptions", ",", "zip_filename", ",", "base_dir", "]", ",", "dry_run", "=", "dry_run", ")", "except", "DistutilsExecError", ":", "# XXX really should distinguish between \"couldn't find", "# external 'zip' command\" and \"zip failed\".", "raise", "DistutilsExecError", ",", "(", "\"unable to create zip file '%s': \"", "\"could neither import the 'zipfile' module nor \"", "\"find a standalone zip utility\"", ")", "%", "zip_filename", "else", ":", "log", ".", "info", "(", "\"creating '%s' and adding '%s' to it\"", ",", "zip_filename", ",", "base_dir", ")", "if", "not", "dry_run", ":", "zip", "=", "zipfile", ".", "ZipFile", "(", "zip_filename", ",", "\"w\"", ",", "compression", "=", "zipfile", ".", "ZIP_DEFLATED", ")", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "for", "name", "in", "filenames", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "zip", ".", "write", "(", "path", ",", "path", ")", "log", ".", "info", "(", "\"adding '%s'\"", "%", "path", ")", "zip", ".", "close", "(", ")", "return", "zip_filename" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/distutils/archive_util.py#L121-L173
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/urllib3/packages/six.py
python
with_metaclass
(meta, *bases)
return type.__new__(metaclass, 'temporary_class', (), {})
Create a base class with a metaclass.
Create a base class with a metaclass.
[ "Create", "a", "base", "class", "with", "a", "metaclass", "." ]
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "# This requires a bit of explanation: the basic idea is to make a dummy", "# metaclass for one level of class instantiation that replaces itself with", "# the actual metaclass.", "class", "metaclass", "(", "meta", ")", ":", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "d", ")", ":", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "return", "type", ".", "__new__", "(", "metaclass", ",", "'temporary_class'", ",", "(", ")", ",", "{", "}", ")" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/urllib3/packages/six.py#L800-L809
fossasia/x-mario-center
fe67afe28d995dcf4e2498e305825a4859566172
build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py
python
SoftwareCenterAppGtk3.close_app
(self)
perform tasks like save-state etc when the application is exited
perform tasks like save-state etc when the application is exited
[ "perform", "tasks", "like", "save", "-", "state", "etc", "when", "the", "application", "is", "exited" ]
def close_app(self): """ perform tasks like save-state etc when the application is exited """ # this may happen during the early initialization # when "app.run()" was called but has not finished seting up the # stuff yet, in this case its ok to just exit if Gtk.main_level() == 0: LOG.info("closing before the regular main loop was run") sys.exit(0) # this is the case when it regularly runs if hasattr(self, "glaunchpad"): self.glaunchpad.shutdown() self.save_state() self.destroy() # this will not throw exceptions in pygi but "only" log via g_critical # to the terminal but it might in the future so we add a handler here try: Gtk.main_quit() except: LOG.exception("Gtk.main_quit failed") # exit here explictely to ensure that no further gtk event loops or # threads run and cause havoc on exit (LP: #914393) sys.exit(0)
[ "def", "close_app", "(", "self", ")", ":", "# this may happen during the early initialization", "# when \"app.run()\" was called but has not finished seting up the", "# stuff yet, in this case its ok to just exit", "if", "Gtk", ".", "main_level", "(", ")", "==", "0", ":", "LOG", ".", "info", "(", "\"closing before the regular main loop was run\"", ")", "sys", ".", "exit", "(", "0", ")", "# this is the case when it regularly runs", "if", "hasattr", "(", "self", ",", "\"glaunchpad\"", ")", ":", "self", ".", "glaunchpad", ".", "shutdown", "(", ")", "self", ".", "save_state", "(", ")", "self", ".", "destroy", "(", ")", "# this will not throw exceptions in pygi but \"only\" log via g_critical", "# to the terminal but it might in the future so we add a handler here", "try", ":", "Gtk", ".", "main_quit", "(", ")", "except", ":", "LOG", ".", "exception", "(", "\"Gtk.main_quit failed\"", ")", "# exit here explictely to ensure that no further gtk event loops or", "# threads run and cause havoc on exit (LP: #914393)", "sys", ".", "exit", "(", "0", ")" ]
https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/build/lib.linux-i686-2.7/softwarecenter/ui/gtk3/app.py#L617-L641
ianmiell/shutit
ef724e1ed4dcc544e594200e0b6cdfa53d04a95f
emailer.py
python
Emailer.__gzip
(filename)
return zipname
Compress a file returning the new filename (.gz)
Compress a file returning the new filename (.gz)
[ "Compress", "a", "file", "returning", "the", "new", "filename", "(", ".", "gz", ")" ]
def __gzip(filename): """ Compress a file returning the new filename (.gz) """ zipname = filename + '.gz' file_pointer = open(filename,'rb') zip_pointer = gzip.open(zipname,'wb') zip_pointer.writelines(file_pointer) file_pointer.close() zip_pointer.close() return zipname
[ "def", "__gzip", "(", "filename", ")", ":", "zipname", "=", "filename", "+", "'.gz'", "file_pointer", "=", "open", "(", "filename", ",", "'rb'", ")", "zip_pointer", "=", "gzip", ".", "open", "(", "zipname", ",", "'wb'", ")", "zip_pointer", ".", "writelines", "(", "file_pointer", ")", "file_pointer", ".", "close", "(", ")", "zip_pointer", ".", "close", "(", ")", "return", "zipname" ]
https://github.com/ianmiell/shutit/blob/ef724e1ed4dcc544e594200e0b6cdfa53d04a95f/emailer.py#L128-L137
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbZhiNengSheBei.alibaba_retail_device_trade_settle
( self, session_key, items, device_sn='', device_type='' )
return self._top_request( "alibaba.retail.device.trade.settle", { "session_key": session_key, "items": items, "device_sn": device_sn, "device_type": device_type } )
č®¾å¤‡ē”®č®¤å¼€å§‹ē»“ē®—ęŽ„å£ č®¾å¤‡äŗ¤ę˜“å·²ē»ē”®č®¤åÆä»„å¼€å§‹ē»“ē®—ęŽ„å£ ę–‡ę”£åœ°å€ļ¼šhttps://open-doc.dingtalk.com/docs/api.htm?apiId=31876 :param session_key: ä¼ščÆid :param items: ē³»ē»Ÿč‡ŖåŠØē”Ÿęˆ :param device_sn: 设备sn :param device_type: 设备sn
č®¾å¤‡ē”®č®¤å¼€å§‹ē»“ē®—ęŽ„å£ č®¾å¤‡äŗ¤ę˜“å·²ē»ē”®č®¤åÆä»„å¼€å§‹ē»“ē®—ęŽ„å£ ę–‡ę”£åœ°å€ļ¼šhttps://open-doc.dingtalk.com/docs/api.htm?apiId=31876
[ "č®¾å¤‡ē”®č®¤å¼€å§‹ē»“ē®—ęŽ„å£", "č®¾å¤‡äŗ¤ę˜“å·²ē»ē”®č®¤åÆä»„å¼€å§‹ē»“ē®—ęŽ„å£", "ę–‡ę”£åœ°å€ļ¼šhttps", ":", "//", "open", "-", "doc", ".", "dingtalk", ".", "com", "/", "docs", "/", "api", ".", "htm?apiId", "=", "31876" ]
def alibaba_retail_device_trade_settle( self, session_key, items, device_sn='', device_type='' ): """ č®¾å¤‡ē”®č®¤å¼€å§‹ē»“ē®—ęŽ„å£ č®¾å¤‡äŗ¤ę˜“å·²ē»ē”®č®¤åÆä»„å¼€å§‹ē»“ē®—ęŽ„å£ ę–‡ę”£åœ°å€ļ¼šhttps://open-doc.dingtalk.com/docs/api.htm?apiId=31876 :param session_key: ä¼ščÆid :param items: ē³»ē»Ÿč‡ŖåŠØē”Ÿęˆ :param device_sn: 设备sn :param device_type: 设备sn """ return self._top_request( "alibaba.retail.device.trade.settle", { "session_key": session_key, "items": items, "device_sn": device_sn, "device_type": device_type } )
[ "def", "alibaba_retail_device_trade_settle", "(", "self", ",", "session_key", ",", "items", ",", "device_sn", "=", "''", ",", "device_type", "=", "''", ")", ":", "return", "self", ".", "_top_request", "(", "\"alibaba.retail.device.trade.settle\"", ",", "{", "\"session_key\"", ":", "session_key", ",", "\"items\"", ":", "items", ",", "\"device_sn\"", ":", "device_sn", ",", "\"device_type\"", ":", "device_type", "}", ")" ]
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L54692-L54717
elfi-dev/elfi
07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c
elfi/methods/bo/gpy_regression.py
python
GPyRegression.predict
(self, x, noiseless=False)
Return the GP model mean and variance at x. Parameters ---------- x : np.array numpy compatible (n, input_dim) array of points to evaluate if len(x.shape) == 1 will be cast to 2D with x[None, :] noiseless : bool whether to include the noise variance or not to the returned variance Returns ------- tuple GP (mean, var) at x where mean : np.array with shape (x.shape[0], 1) var : np.array with shape (x.shape[0], 1)
Return the GP model mean and variance at x.
[ "Return", "the", "GP", "model", "mean", "and", "variance", "at", "x", "." ]
def predict(self, x, noiseless=False): """Return the GP model mean and variance at x. Parameters ---------- x : np.array numpy compatible (n, input_dim) array of points to evaluate if len(x.shape) == 1 will be cast to 2D with x[None, :] noiseless : bool whether to include the noise variance or not to the returned variance Returns ------- tuple GP (mean, var) at x where mean : np.array with shape (x.shape[0], 1) var : np.array with shape (x.shape[0], 1) """ # Ensure it's 2d for GPy x = np.asanyarray(x).reshape((-1, self.input_dim)) if self._gp is None: # TODO: return from GP mean function if given return np.zeros((x.shape[0], 1)), \ np.ones((x.shape[0], 1)) # direct (=faster) implementation for RBF kernel if self.is_sampling and self._kernel_is_default: if not self._rbf_is_cached: self._cache_RBF_kernel() r2 = np.sum(x**2., 1)[:, None] + self._rbf_x2sum - 2. * x.dot(self._gp.X.T) kx = self._rbf_var * np.exp(r2 * self._rbf_factor) + self._rbf_bias mu = kx.dot(self._rbf_woodbury) var = self._rbf_var + self._rbf_bias var -= kx.dot(self._rbf_woodbury_inv.dot(kx.T)) var += self._rbf_noisevar # likelihood return mu, var else: self._rbf_is_cached = False # in case one resumes fitting the GP after sampling if noiseless: return self._gp.predict_noiseless(x) else: return self._gp.predict(x)
[ "def", "predict", "(", "self", ",", "x", ",", "noiseless", "=", "False", ")", ":", "# Ensure it's 2d for GPy", "x", "=", "np", ".", "asanyarray", "(", "x", ")", ".", "reshape", "(", "(", "-", "1", ",", "self", ".", "input_dim", ")", ")", "if", "self", ".", "_gp", "is", "None", ":", "# TODO: return from GP mean function if given", "return", "np", ".", "zeros", "(", "(", "x", ".", "shape", "[", "0", "]", ",", "1", ")", ")", ",", "np", ".", "ones", "(", "(", "x", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "# direct (=faster) implementation for RBF kernel", "if", "self", ".", "is_sampling", "and", "self", ".", "_kernel_is_default", ":", "if", "not", "self", ".", "_rbf_is_cached", ":", "self", ".", "_cache_RBF_kernel", "(", ")", "r2", "=", "np", ".", "sum", "(", "x", "**", "2.", ",", "1", ")", "[", ":", ",", "None", "]", "+", "self", ".", "_rbf_x2sum", "-", "2.", "*", "x", ".", "dot", "(", "self", ".", "_gp", ".", "X", ".", "T", ")", "kx", "=", "self", ".", "_rbf_var", "*", "np", ".", "exp", "(", "r2", "*", "self", ".", "_rbf_factor", ")", "+", "self", ".", "_rbf_bias", "mu", "=", "kx", ".", "dot", "(", "self", ".", "_rbf_woodbury", ")", "var", "=", "self", ".", "_rbf_var", "+", "self", ".", "_rbf_bias", "var", "-=", "kx", ".", "dot", "(", "self", ".", "_rbf_woodbury_inv", ".", "dot", "(", "kx", ".", "T", ")", ")", "var", "+=", "self", ".", "_rbf_noisevar", "# likelihood", "return", "mu", ",", "var", "else", ":", "self", ".", "_rbf_is_cached", "=", "False", "# in case one resumes fitting the GP after sampling", "if", "noiseless", ":", "return", "self", ".", "_gp", ".", "predict_noiseless", "(", "x", ")", "else", ":", "return", "self", ".", "_gp", ".", "predict", "(", "x", ")" ]
https://github.com/elfi-dev/elfi/blob/07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c/elfi/methods/bo/gpy_regression.py#L97-L146
StevenBlack/hosts
c57a61bb09a85e775e12c5566e82c3c952b2aa9d
updateHostsFile.py
python
list_dir_no_hidden
(path)
return glob(os.path.join(path, "*"))
List all files in a directory, except for hidden files. Parameters ---------- path : str The path of the directory whose files we wish to list.
List all files in a directory, except for hidden files.
[ "List", "all", "files", "in", "a", "directory", "except", "for", "hidden", "files", "." ]
def list_dir_no_hidden(path): """ List all files in a directory, except for hidden files. Parameters ---------- path : str The path of the directory whose files we wish to list. """ return glob(os.path.join(path, "*"))
[ "def", "list_dir_no_hidden", "(", "path", ")", ":", "return", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"*\"", ")", ")" ]
https://github.com/StevenBlack/hosts/blob/c57a61bb09a85e775e12c5566e82c3c952b2aa9d/updateHostsFile.py#L1561-L1571
mcfletch/pyopengl
02d11dad9ff18e50db10e975c4756e17bf198464
OpenGL/GL/exceptional.py
python
glRasterPos
( *args )
return function( *args )
Choose glRasterPosX based on number of args
Choose glRasterPosX based on number of args
[ "Choose", "glRasterPosX", "based", "on", "number", "of", "args" ]
def glRasterPos( *args ): """Choose glRasterPosX based on number of args""" if len(args) == 1: # v form... args = args[0] function = glRasterPosDispatch[ len(args) ] return function( *args )
[ "def", "glRasterPos", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "# v form...", "args", "=", "args", "[", "0", "]", "function", "=", "glRasterPosDispatch", "[", "len", "(", "args", ")", "]", "return", "function", "(", "*", "args", ")" ]
https://github.com/mcfletch/pyopengl/blob/02d11dad9ff18e50db10e975c4756e17bf198464/OpenGL/GL/exceptional.py#L118-L124
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/lib/blockstore_api/methods.py
python
api_request
(method, url, **kwargs)
return response.json()
Helper method for making a request to the Blockstore REST API
Helper method for making a request to the Blockstore REST API
[ "Helper", "method", "for", "making", "a", "request", "to", "the", "Blockstore", "REST", "API" ]
def api_request(method, url, **kwargs): """ Helper method for making a request to the Blockstore REST API """ if not settings.BLOCKSTORE_API_AUTH_TOKEN: raise ImproperlyConfigured("Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.") kwargs.setdefault('headers', {})['Authorization'] = f"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}" response = requests.request(method, url, **kwargs) if response.status_code == 404: raise NotFound response.raise_for_status() if response.status_code == 204: return None # No content return response.json()
[ "def", "api_request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "not", "settings", ".", "BLOCKSTORE_API_AUTH_TOKEN", ":", "raise", "ImproperlyConfigured", "(", "\"Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.\"", ")", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "[", "'Authorization'", "]", "=", "f\"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}\"", "response", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", "if", "response", ".", "status_code", "==", "404", ":", "raise", "NotFound", "response", ".", "raise_for_status", "(", ")", "if", "response", ".", "status_code", "==", "204", ":", "return", "None", "# No content", "return", "response", ".", "json", "(", ")" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/lib/blockstore_api/methods.py#L39-L52
getnikola/nikola
2da876e9322e42a93f8295f950e336465c6a4ee5
nikola/plugins/compile/rest/thumbnail.py
python
Thumbnail.run
(self)
return [node]
Run the thumbnail directive.
Run the thumbnail directive.
[ "Run", "the", "thumbnail", "directive", "." ]
def run(self): """Run the thumbnail directive.""" uri = directives.uri(self.arguments[0]) if uri.endswith('.svg'): # the ? at the end makes docutil output an <img> instead of an object for the svg, which lightboxes may require self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri)) + '?' else: self.arguments[0] = '.thumbnail'.join(os.path.splitext(uri)) self.options['target'] = uri if self.content: (node,) = Figure.run(self) else: (node,) = Image.run(self) return [node]
[ "def", "run", "(", "self", ")", ":", "uri", "=", "directives", ".", "uri", "(", "self", ".", "arguments", "[", "0", "]", ")", "if", "uri", ".", "endswith", "(", "'.svg'", ")", ":", "# the ? at the end makes docutil output an <img> instead of an object for the svg, which lightboxes may require", "self", ".", "arguments", "[", "0", "]", "=", "'.thumbnail'", ".", "join", "(", "os", ".", "path", ".", "splitext", "(", "uri", ")", ")", "+", "'?'", "else", ":", "self", ".", "arguments", "[", "0", "]", "=", "'.thumbnail'", ".", "join", "(", "os", ".", "path", ".", "splitext", "(", "uri", ")", ")", "self", ".", "options", "[", "'target'", "]", "=", "uri", "if", "self", ".", "content", ":", "(", "node", ",", ")", "=", "Figure", ".", "run", "(", "self", ")", "else", ":", "(", "node", ",", ")", "=", "Image", ".", "run", "(", "self", ")", "return", "[", "node", "]" ]
https://github.com/getnikola/nikola/blob/2da876e9322e42a93f8295f950e336465c6a4ee5/nikola/plugins/compile/rest/thumbnail.py#L68-L81
joxeankoret/nightmare
11b22bb7c346611de90f479ee781c9228af453ea
lib/interfaces/vdb/extensions/gdbstub.py
python
ethread
(db, line)
Display information about the currently stopped ethread. Usage: ethread #FIXME support listing them #FIXME support ethread interp arbitrary address
Display information about the currently stopped ethread.
[ "Display", "information", "about", "the", "currently", "stopped", "ethread", "." ]
def ethread(db, line): ''' Display information about the currently stopped ethread. Usage: ethread #FIXME support listing them #FIXME support ethread interp arbitrary address ''' t = db.getTrace() t.requireNotRunning() fsbase = t._getVmwareReg('fs') kpcr = t.getStruct('nt.KPCR', fsbase) ethraddr = kpcr.PrcbData.CurrentThread ethr = t.getStruct('nt.ETHREAD', ethraddr) db.vprint(ethr.tree(va=ethraddr))
[ "def", "ethread", "(", "db", ",", "line", ")", ":", "t", "=", "db", ".", "getTrace", "(", ")", "t", ".", "requireNotRunning", "(", ")", "fsbase", "=", "t", ".", "_getVmwareReg", "(", "'fs'", ")", "kpcr", "=", "t", ".", "getStruct", "(", "'nt.KPCR'", ",", "fsbase", ")", "ethraddr", "=", "kpcr", ".", "PrcbData", ".", "CurrentThread", "ethr", "=", "t", ".", "getStruct", "(", "'nt.ETHREAD'", ",", "ethraddr", ")", "db", ".", "vprint", "(", "ethr", ".", "tree", "(", "va", "=", "ethraddr", ")", ")" ]
https://github.com/joxeankoret/nightmare/blob/11b22bb7c346611de90f479ee781c9228af453ea/lib/interfaces/vdb/extensions/gdbstub.py#L5-L19
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/builds/models.py
python
BuildSpec.get_build
(self)
[]
def get_build(self): if self.latest: return CommCareBuild.get_build(self.version, latest=True) else: return CommCareBuild.get_build(self.version, self.build_number)
[ "def", "get_build", "(", "self", ")", ":", "if", "self", ".", "latest", ":", "return", "CommCareBuild", ".", "get_build", "(", "self", ".", "version", ",", "latest", "=", "True", ")", "else", ":", "return", "CommCareBuild", ".", "get_build", "(", "self", ".", "version", ",", "self", ".", "build_number", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/builds/models.py#L185-L189
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Tools/bgen/bgen/bgenOutput.py
python
SetLevel
(level)
Set the current indentation level. This does no type or range checking -- use at own risk.
Set the current indentation level.
[ "Set", "the", "current", "indentation", "level", "." ]
def SetLevel(level): """Set the current indentation level. This does no type or range checking -- use at own risk. """ global _Level _Level = level
[ "def", "SetLevel", "(", "level", ")", ":", "global", "_Level", "_Level", "=", "level" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Tools/bgen/bgen/bgenOutput.py#L49-L55
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/boto-2.46.1/boto/ec2/connection.py
python
EC2Connection.delete_volume
(self, volume_id, dry_run=False)
return self.get_status('DeleteVolume', params, verb='POST')
Delete an EBS volume. :type volume_id: str :param volume_id: The ID of the volume to be delete. :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. :rtype: bool :return: True if successful
Delete an EBS volume.
[ "Delete", "an", "EBS", "volume", "." ]
def delete_volume(self, volume_id, dry_run=False): """ Delete an EBS volume. :type volume_id: str :param volume_id: The ID of the volume to be delete. :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. :rtype: bool :return: True if successful """ params = {'VolumeId': volume_id} if dry_run: params['DryRun'] = 'true' return self.get_status('DeleteVolume', params, verb='POST')
[ "def", "delete_volume", "(", "self", ",", "volume_id", ",", "dry_run", "=", "False", ")", ":", "params", "=", "{", "'VolumeId'", ":", "volume_id", "}", "if", "dry_run", ":", "params", "[", "'DryRun'", "]", "=", "'true'", "return", "self", ".", "get_status", "(", "'DeleteVolume'", ",", "params", ",", "verb", "=", "'POST'", ")" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/boto-2.46.1/boto/ec2/connection.py#L2333-L2349
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/vendor/dateutil/rrule.py
python
_rrulestr._parse_rfc
(self, s, dtstart=None, cache=False, unfold=False, forceset=False, compatible=False, ignoretz=False, tzids=None, tzinfos=None)
[]
def _parse_rfc(self, s, dtstart=None, cache=False, unfold=False, forceset=False, compatible=False, ignoretz=False, tzids=None, tzinfos=None): global parser if compatible: forceset = True unfold = True TZID_NAMES = dict(map( lambda x: (x.upper(), x), re.findall('TZID=(?P<name>[^:]+):', s) )) s = s.upper() if not s.strip(): raise ValueError("empty string") if unfold: lines = s.splitlines() i = 0 while i < len(lines): line = lines[i].rstrip() if not line: del lines[i] elif i > 0 and line[0] == " ": lines[i-1] += line[1:] del lines[i] else: i += 1 else: lines = s.split() if (not forceset and len(lines) == 1 and (s.find(':') == -1 or s.startswith('RRULE:'))): return self._parse_rfc_rrule(lines[0], cache=cache, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos) else: rrulevals = [] rdatevals = [] exrulevals = [] exdatevals = [] for line in lines: if not line: continue if line.find(':') == -1: name = "RRULE" value = line else: name, value = line.split(':', 1) parms = name.split(';') if not parms: raise ValueError("empty property name") name = parms[0] parms = parms[1:] if name == "RRULE": for parm in parms: raise ValueError("unsupported RRULE parm: "+parm) rrulevals.append(value) elif name == "RDATE": for parm in parms: if parm != "VALUE=DATE-TIME": raise ValueError("unsupported RDATE parm: "+parm) rdatevals.append(value) elif name == "EXRULE": for parm in parms: raise ValueError("unsupported EXRULE parm: "+parm) exrulevals.append(value) elif name == "EXDATE": exdatevals.extend( self._parse_date_value(value, parms, TZID_NAMES, ignoretz, tzids, tzinfos) ) elif name == "DTSTART": dtvals = self._parse_date_value(value, parms, TZID_NAMES, ignoretz, tzids, tzinfos) if len(dtvals) != 1: raise ValueError("Multiple DTSTART values specified:" + value) dtstart = dtvals[0] else: raise ValueError("unsupported property: "+name) if (forceset or len(rrulevals) > 1 or rdatevals or exrulevals or exdatevals): if not parser and (rdatevals or exdatevals): from pipenv.vendor.dateutil import parser rset = rruleset(cache=cache) for value in rrulevals: rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos)) for value in rdatevals: for datestr in value.split(','): rset.rdate(parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)) for value in exrulevals: rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, ignoretz=ignoretz, tzinfos=tzinfos)) for value in exdatevals: rset.exdate(value) if compatible and dtstart: rset.rdate(dtstart) return rset else: return self._parse_rfc_rrule(rrulevals[0], dtstart=dtstart, cache=cache, ignoretz=ignoretz, tzinfos=tzinfos)
[ "def", "_parse_rfc", "(", "self", ",", "s", ",", "dtstart", "=", "None", ",", "cache", "=", "False", ",", "unfold", "=", "False", ",", "forceset", "=", "False", ",", "compatible", "=", "False", ",", "ignoretz", "=", "False", ",", "tzids", "=", "None", ",", "tzinfos", "=", "None", ")", ":", "global", "parser", "if", "compatible", ":", "forceset", "=", "True", "unfold", "=", "True", "TZID_NAMES", "=", "dict", "(", "map", "(", "lambda", "x", ":", "(", "x", ".", "upper", "(", ")", ",", "x", ")", ",", "re", ".", "findall", "(", "'TZID=(?P<name>[^:]+):'", ",", "s", ")", ")", ")", "s", "=", "s", ".", "upper", "(", ")", "if", "not", "s", ".", "strip", "(", ")", ":", "raise", "ValueError", "(", "\"empty string\"", ")", "if", "unfold", ":", "lines", "=", "s", ".", "splitlines", "(", ")", "i", "=", "0", "while", "i", "<", "len", "(", "lines", ")", ":", "line", "=", "lines", "[", "i", "]", ".", "rstrip", "(", ")", "if", "not", "line", ":", "del", "lines", "[", "i", "]", "elif", "i", ">", "0", "and", "line", "[", "0", "]", "==", "\" \"", ":", "lines", "[", "i", "-", "1", "]", "+=", "line", "[", "1", ":", "]", "del", "lines", "[", "i", "]", "else", ":", "i", "+=", "1", "else", ":", "lines", "=", "s", ".", "split", "(", ")", "if", "(", "not", "forceset", "and", "len", "(", "lines", ")", "==", "1", "and", "(", "s", ".", "find", "(", "':'", ")", "==", "-", "1", "or", "s", ".", "startswith", "(", "'RRULE:'", ")", ")", ")", ":", "return", "self", ".", "_parse_rfc_rrule", "(", "lines", "[", "0", "]", ",", "cache", "=", "cache", ",", "dtstart", "=", "dtstart", ",", "ignoretz", "=", "ignoretz", ",", "tzinfos", "=", "tzinfos", ")", "else", ":", "rrulevals", "=", "[", "]", "rdatevals", "=", "[", "]", "exrulevals", "=", "[", "]", "exdatevals", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "not", "line", ":", "continue", "if", "line", ".", "find", "(", "':'", ")", "==", "-", "1", ":", "name", "=", "\"RRULE\"", "value", "=", "line", "else", ":", "name", ",", "value", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "parms", "=", "name", ".", "split", "(", "';'", ")", "if", "not", "parms", ":", "raise", "ValueError", "(", "\"empty property name\"", ")", "name", "=", "parms", "[", "0", "]", "parms", "=", "parms", "[", "1", ":", "]", "if", "name", "==", "\"RRULE\"", ":", "for", "parm", "in", "parms", ":", "raise", "ValueError", "(", "\"unsupported RRULE parm: \"", "+", "parm", ")", "rrulevals", ".", "append", "(", "value", ")", "elif", "name", "==", "\"RDATE\"", ":", "for", "parm", "in", "parms", ":", "if", "parm", "!=", "\"VALUE=DATE-TIME\"", ":", "raise", "ValueError", "(", "\"unsupported RDATE parm: \"", "+", "parm", ")", "rdatevals", ".", "append", "(", "value", ")", "elif", "name", "==", "\"EXRULE\"", ":", "for", "parm", "in", "parms", ":", "raise", "ValueError", "(", "\"unsupported EXRULE parm: \"", "+", "parm", ")", "exrulevals", ".", "append", "(", "value", ")", "elif", "name", "==", "\"EXDATE\"", ":", "exdatevals", ".", "extend", "(", "self", ".", "_parse_date_value", "(", "value", ",", "parms", ",", "TZID_NAMES", ",", "ignoretz", ",", "tzids", ",", "tzinfos", ")", ")", "elif", "name", "==", "\"DTSTART\"", ":", "dtvals", "=", "self", ".", "_parse_date_value", "(", "value", ",", "parms", ",", "TZID_NAMES", ",", "ignoretz", ",", "tzids", ",", "tzinfos", ")", "if", "len", "(", "dtvals", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Multiple DTSTART values specified:\"", "+", "value", ")", "dtstart", "=", "dtvals", "[", "0", "]", "else", ":", "raise", "ValueError", "(", "\"unsupported property: \"", "+", "name", ")", "if", "(", "forceset", "or", "len", "(", "rrulevals", ")", ">", "1", "or", "rdatevals", "or", "exrulevals", "or", "exdatevals", ")", ":", "if", "not", "parser", "and", "(", "rdatevals", "or", "exdatevals", ")", ":", "from", "pipenv", ".", "vendor", ".", "dateutil", "import", "parser", "rset", "=", "rruleset", "(", "cache", "=", "cache", ")", "for", "value", "in", "rrulevals", ":", "rset", ".", "rrule", "(", "self", ".", "_parse_rfc_rrule", "(", "value", ",", "dtstart", "=", "dtstart", ",", "ignoretz", "=", "ignoretz", ",", "tzinfos", "=", "tzinfos", ")", ")", "for", "value", "in", "rdatevals", ":", "for", "datestr", "in", "value", ".", "split", "(", "','", ")", ":", "rset", ".", "rdate", "(", "parser", ".", "parse", "(", "datestr", ",", "ignoretz", "=", "ignoretz", ",", "tzinfos", "=", "tzinfos", ")", ")", "for", "value", "in", "exrulevals", ":", "rset", ".", "exrule", "(", "self", ".", "_parse_rfc_rrule", "(", "value", ",", "dtstart", "=", "dtstart", ",", "ignoretz", "=", "ignoretz", ",", "tzinfos", "=", "tzinfos", ")", ")", "for", "value", "in", "exdatevals", ":", "rset", ".", "exdate", "(", "value", ")", "if", "compatible", "and", "dtstart", ":", "rset", ".", "rdate", "(", "dtstart", ")", "return", "rset", "else", ":", "return", "self", ".", "_parse_rfc_rrule", "(", "rrulevals", "[", "0", "]", ",", "dtstart", "=", "dtstart", ",", "cache", "=", "cache", ",", "ignoretz", "=", "ignoretz", ",", "tzinfos", "=", "tzinfos", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/dateutil/rrule.py#L1615-L1729
deepchem/deepchem
054eb4b2b082e3df8e1a8e77f36a52137ae6e375
deepchem/models/layers.py
python
GraphEmbedPoolLayer.call
(self, inputs)
return result, result_A
Parameters ---------- num_filters: int Number of filters to have in the output in_layers: list of Layers or tensors [V, A, mask] V are the vertex features must be of shape (batch, vertex, channel) A are the adjacency matrixes for each graph Shape (batch, from_vertex, adj_matrix, to_vertex) mask is optional, to be used when not every graph has the same number of vertices Returns ------- Returns a `tf.tensor` with a graph convolution applied The shape will be `(batch, vertex, self.num_filters)`.
Parameters ---------- num_filters: int Number of filters to have in the output in_layers: list of Layers or tensors [V, A, mask] V are the vertex features must be of shape (batch, vertex, channel)
[ "Parameters", "----------", "num_filters", ":", "int", "Number", "of", "filters", "to", "have", "in", "the", "output", "in_layers", ":", "list", "of", "Layers", "or", "tensors", "[", "V", "A", "mask", "]", "V", "are", "the", "vertex", "features", "must", "be", "of", "shape", "(", "batch", "vertex", "channel", ")" ]
def call(self, inputs): """ Parameters ---------- num_filters: int Number of filters to have in the output in_layers: list of Layers or tensors [V, A, mask] V are the vertex features must be of shape (batch, vertex, channel) A are the adjacency matrixes for each graph Shape (batch, from_vertex, adj_matrix, to_vertex) mask is optional, to be used when not every graph has the same number of vertices Returns ------- Returns a `tf.tensor` with a graph convolution applied The shape will be `(batch, vertex, self.num_filters)`. """ if len(inputs) == 3: V, A, mask = inputs else: V, A = inputs mask = None factors = self.embedding_factors(V) if mask is not None: factors = tf.multiply(factors, mask) factors = self.softmax_factors(factors) result = tf.matmul(factors, V, transpose_a=True) result_A = tf.reshape(A, (tf.shape(A)[0], -1, tf.shape(A)[-1])) result_A = tf.matmul(result_A, factors) result_A = tf.reshape(result_A, (tf.shape(A)[0], tf.shape(A)[-1], -1)) result_A = tf.matmul(factors, result_A, transpose_a=True) result_A = tf.reshape(result_A, (tf.shape(A)[0], self.num_vertices, A.get_shape()[2], self.num_vertices)) return result, result_A
[ "def", "call", "(", "self", ",", "inputs", ")", ":", "if", "len", "(", "inputs", ")", "==", "3", ":", "V", ",", "A", ",", "mask", "=", "inputs", "else", ":", "V", ",", "A", "=", "inputs", "mask", "=", "None", "factors", "=", "self", ".", "embedding_factors", "(", "V", ")", "if", "mask", "is", "not", "None", ":", "factors", "=", "tf", ".", "multiply", "(", "factors", ",", "mask", ")", "factors", "=", "self", ".", "softmax_factors", "(", "factors", ")", "result", "=", "tf", ".", "matmul", "(", "factors", ",", "V", ",", "transpose_a", "=", "True", ")", "result_A", "=", "tf", ".", "reshape", "(", "A", ",", "(", "tf", ".", "shape", "(", "A", ")", "[", "0", "]", ",", "-", "1", ",", "tf", ".", "shape", "(", "A", ")", "[", "-", "1", "]", ")", ")", "result_A", "=", "tf", ".", "matmul", "(", "result_A", ",", "factors", ")", "result_A", "=", "tf", ".", "reshape", "(", "result_A", ",", "(", "tf", ".", "shape", "(", "A", ")", "[", "0", "]", ",", "tf", ".", "shape", "(", "A", ")", "[", "-", "1", "]", ",", "-", "1", ")", ")", "result_A", "=", "tf", ".", "matmul", "(", "factors", ",", "result_A", ",", "transpose_a", "=", "True", ")", "result_A", "=", "tf", ".", "reshape", "(", "result_A", ",", "(", "tf", ".", "shape", "(", "A", ")", "[", "0", "]", ",", "self", ".", "num_vertices", ",", "A", ".", "get_shape", "(", ")", "[", "2", "]", ",", "self", ".", "num_vertices", ")", ")", "return", "result", ",", "result_A" ]
https://github.com/deepchem/deepchem/blob/054eb4b2b082e3df8e1a8e77f36a52137ae6e375/deepchem/models/layers.py#L2396-L2436
ReactionMechanismGenerator/RMG-Py
2b7baf51febf27157def58fb3f6cee03fb6a684c
rmgpy/data/solvation.py
python
SoluteLibrary.save_entry
(self, f, entry)
return save_entry(f, entry)
Write the given `entry` in the solute database to the file object `f`.
Write the given `entry` in the solute database to the file object `f`.
[ "Write", "the", "given", "entry", "in", "the", "solute", "database", "to", "the", "file", "object", "f", "." ]
def save_entry(self, f, entry): """ Write the given `entry` in the solute database to the file object `f`. """ return save_entry(f, entry)
[ "def", "save_entry", "(", "self", ",", "f", ",", "entry", ")", ":", "return", "save_entry", "(", "f", ",", "entry", ")" ]
https://github.com/ReactionMechanismGenerator/RMG-Py/blob/2b7baf51febf27157def58fb3f6cee03fb6a684c/rmgpy/data/solvation.py#L694-L698
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-build/python-libs/xmpppy/xmpp/protocol.py
python
Presence.getShow
(self)
return self.getTagData('show')
Returns the show value of the message.
Returns the show value of the message.
[ "Returns", "the", "show", "value", "of", "the", "message", "." ]
def getShow(self): """ Returns the show value of the message. """ return self.getTagData('show')
[ "def", "getShow", "(", "self", ")", ":", "return", "self", ".", "getTagData", "(", "'show'", ")" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/xmpppy/xmpp/protocol.py#L462-L464
zulip/zulip
19f891968de50d43920af63526c823bdd233cdee
tools/lib/pretty_print.py
python
shift_indents_to_the_next_tokens
(tokens: List[Token])
During the parsing/validation phase, it's useful to have separate tokens for "indent" chunks, but during pretty printing, we like to attach an `.indent` field to the substantive node, whether it's an HTML tag or template directive or whatever.
During the parsing/validation phase, it's useful to have separate tokens for "indent" chunks, but during pretty printing, we like to attach an `.indent` field to the substantive node, whether it's an HTML tag or template directive or whatever.
[ "During", "the", "parsing", "/", "validation", "phase", "it", "s", "useful", "to", "have", "separate", "tokens", "for", "indent", "chunks", "but", "during", "pretty", "printing", "we", "like", "to", "attach", "an", ".", "indent", "field", "to", "the", "substantive", "node", "whether", "it", "s", "an", "HTML", "tag", "or", "template", "directive", "or", "whatever", "." ]
def shift_indents_to_the_next_tokens(tokens: List[Token]) -> None: """ During the parsing/validation phase, it's useful to have separate tokens for "indent" chunks, but during pretty printing, we like to attach an `.indent` field to the substantive node, whether it's an HTML tag or template directive or whatever. """ tokens[0].indent = "" for i, token in enumerate(tokens[:-1]): next_token = tokens[i + 1] if token.kind == "indent": next_token.indent = token.s token.new_s = "" if token.kind == "newline" and next_token.kind != "indent": next_token.indent = ""
[ "def", "shift_indents_to_the_next_tokens", "(", "tokens", ":", "List", "[", "Token", "]", ")", "->", "None", ":", "tokens", "[", "0", "]", ".", "indent", "=", "\"\"", "for", "i", ",", "token", "in", "enumerate", "(", "tokens", "[", ":", "-", "1", "]", ")", ":", "next_token", "=", "tokens", "[", "i", "+", "1", "]", "if", "token", ".", "kind", "==", "\"indent\"", ":", "next_token", ".", "indent", "=", "token", ".", "s", "token", ".", "new_s", "=", "\"\"", "if", "token", ".", "kind", "==", "\"newline\"", "and", "next_token", ".", "kind", "!=", "\"indent\"", ":", "next_token", ".", "indent", "=", "\"\"" ]
https://github.com/zulip/zulip/blob/19f891968de50d43920af63526c823bdd233cdee/tools/lib/pretty_print.py#L9-L26
mozman/ezdxf
59d0fc2ea63f5cf82293428f5931da7e9f9718e9
src/ezdxf/render/mesh.py
python
MeshTransformer.translate
(self, dx: float = 0, dy: float = 0, dz: float = 0)
return self
Translate mesh inplace. Args: dx: translation in x-axis dy: translation in y-axis dz: translation in z-axis
Translate mesh inplace.
[ "Translate", "mesh", "inplace", "." ]
def translate(self, dx: float = 0, dy: float = 0, dz: float = 0): """Translate mesh inplace. Args: dx: translation in x-axis dy: translation in y-axis dz: translation in z-axis """ if isinstance(dx, (float, int)): t = Vec3(dx, dy, dz) else: t = Vec3(dx) self.vertices = [t + v for v in self.vertices] return self
[ "def", "translate", "(", "self", ",", "dx", ":", "float", "=", "0", ",", "dy", ":", "float", "=", "0", ",", "dz", ":", "float", "=", "0", ")", ":", "if", "isinstance", "(", "dx", ",", "(", "float", ",", "int", ")", ")", ":", "t", "=", "Vec3", "(", "dx", ",", "dy", ",", "dz", ")", "else", ":", "t", "=", "Vec3", "(", "dx", ")", "self", ".", "vertices", "=", "[", "t", "+", "v", "for", "v", "in", "self", ".", "vertices", "]", "return", "self" ]
https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/render/mesh.py#L357-L371
SanPen/GridCal
d3f4566d2d72c11c7e910c9d162538ef0e60df31
src/GridCal/Engine/Simulations/PowerFlow/time_series_clustring_driver.py
python
TimeSeriesClustering.__init__
(self, grid: MultiCircuit, options: PowerFlowOptions, opf_time_series_results=None, start_=0, end_=None, cluster_number=10)
TimeSeriesClustering constructor @param grid: MultiCircuit instance @param options: PowerFlowOptions instance
TimeSeriesClustering constructor
[ "TimeSeriesClustering", "constructor" ]
def __init__(self, grid: MultiCircuit, options: PowerFlowOptions, opf_time_series_results=None, start_=0, end_=None, cluster_number=10): """ TimeSeriesClustering constructor @param grid: MultiCircuit instance @param options: PowerFlowOptions instance """ TimeSeries.__init__(self, grid=grid, options=options, opf_time_series_results=opf_time_series_results, start_=start_, end_=end_) self.cluster_number = cluster_number self.sampled_time_idx = list() self.sampled_probabilities = list()
[ "def", "__init__", "(", "self", ",", "grid", ":", "MultiCircuit", ",", "options", ":", "PowerFlowOptions", ",", "opf_time_series_results", "=", "None", ",", "start_", "=", "0", ",", "end_", "=", "None", ",", "cluster_number", "=", "10", ")", ":", "TimeSeries", ".", "__init__", "(", "self", ",", "grid", "=", "grid", ",", "options", "=", "options", ",", "opf_time_series_results", "=", "opf_time_series_results", ",", "start_", "=", "start_", ",", "end_", "=", "end_", ")", "self", ".", "cluster_number", "=", "cluster_number", "self", ".", "sampled_time_idx", "=", "list", "(", ")", "self", ".", "sampled_probabilities", "=", "list", "(", ")" ]
https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/GridCal/Engine/Simulations/PowerFlow/time_series_clustring_driver.py#L39-L52
pyqt/examples
843bb982917cecb2350b5f6d7f42c9b7fb142ec1
src/pyqt-official/widgets/calculator.py
python
Calculator.createButton
(self, text, member)
return button
[]
def createButton(self, text, member): button = Button(text) button.clicked.connect(member) return button
[ "def", "createButton", "(", "self", ",", "text", ",", "member", ")", ":", "button", "=", "Button", "(", "text", ")", "button", ".", "clicked", ".", "connect", "(", "member", ")", "return", "button" ]
https://github.com/pyqt/examples/blob/843bb982917cecb2350b5f6d7f42c9b7fb142ec1/src/pyqt-official/widgets/calculator.py#L325-L328
assemblerflow/flowcraft
66cef255589238b1c9afe6e80b6917e1225915e7
flowcraft/generator/pipeline_parser.py
python
insanity_checks
(pipeline_str)
Wrapper that performs all sanity checks on the pipeline string Parameters ---------- pipeline_str : str String with the pipeline definition
Wrapper that performs all sanity checks on the pipeline string
[ "Wrapper", "that", "performs", "all", "sanity", "checks", "on", "the", "pipeline", "string" ]
def insanity_checks(pipeline_str): """Wrapper that performs all sanity checks on the pipeline string Parameters ---------- pipeline_str : str String with the pipeline definition """ # Gets rid of all spaces in string p_string = pipeline_str.replace(" ", "").strip() # some of the check functions use the pipeline_str as the user provided but # the majority uses the parsed p_string. checks = [ [p_string, [ empty_tasks, brackets_but_no_lanes, brackets_insanity_check, lane_char_insanity_check, final_char_insanity_check, fork_procs_insanity_check, start_proc_insanity_check, late_proc_insanity_check ]], [pipeline_str, [ inner_fork_insanity_checks ]] ] # executes sanity checks in pipeline string before parsing it. for param, func_list in checks: for func in func_list: func(param)
[ "def", "insanity_checks", "(", "pipeline_str", ")", ":", "# Gets rid of all spaces in string", "p_string", "=", "pipeline_str", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "strip", "(", ")", "# some of the check functions use the pipeline_str as the user provided but", "# the majority uses the parsed p_string.", "checks", "=", "[", "[", "p_string", ",", "[", "empty_tasks", ",", "brackets_but_no_lanes", ",", "brackets_insanity_check", ",", "lane_char_insanity_check", ",", "final_char_insanity_check", ",", "fork_procs_insanity_check", ",", "start_proc_insanity_check", ",", "late_proc_insanity_check", "]", "]", ",", "[", "pipeline_str", ",", "[", "inner_fork_insanity_checks", "]", "]", "]", "# executes sanity checks in pipeline string before parsing it.", "for", "param", ",", "func_list", "in", "checks", ":", "for", "func", "in", "func_list", ":", "func", "(", "param", ")" ]
https://github.com/assemblerflow/flowcraft/blob/66cef255589238b1c9afe6e80b6917e1225915e7/flowcraft/generator/pipeline_parser.py#L305-L338
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/agca/modules.py
python
FreeModule.is_submodule
(self, other)
return False
Returns True if ``other`` is a submodule of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> M = F.submodule([2, x]) >>> F.is_submodule(F) True >>> F.is_submodule(M) True >>> M.is_submodule(F) False
Returns True if ``other`` is a submodule of ``self``.
[ "Returns", "True", "if", "other", "is", "a", "submodule", "of", "self", "." ]
def is_submodule(self, other): """ Returns True if ``other`` is a submodule of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> M = F.submodule([2, x]) >>> F.is_submodule(F) True >>> F.is_submodule(M) True >>> M.is_submodule(F) False """ if isinstance(other, SubModule): return other.container == self if isinstance(other, FreeModule): return other.ring == self.ring and other.rank == self.rank return False
[ "def", "is_submodule", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "SubModule", ")", ":", "return", "other", ".", "container", "==", "self", "if", "isinstance", "(", "other", ",", "FreeModule", ")", ":", "return", "other", ".", "ring", "==", "self", ".", "ring", "and", "other", ".", "rank", "==", "self", ".", "rank", "return", "False" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/polys/agca/modules.py#L306-L325
crits/crits_services
c7abf91f1865d913cffad4b966599da204f8ae43
virustotal_service/__init__.py
python
VirusTotalService._process_pcap
(self, pcap, scandate)
Add Pcap file to CRITs. Args: pcap (binary): pcap data scandate (str): scan date from when pcap was collected TODO: Add an error check
Add Pcap file to CRITs.
[ "Add", "Pcap", "file", "to", "CRITs", "." ]
def _process_pcap(self, pcap, scandate): """ Add Pcap file to CRITs. Args: pcap (binary): pcap data scandate (str): scan date from when pcap was collected TODO: Add an error check """ self._info("Adding PCAP and creating relationship to %s" % (str(self.obj.id))) self._notify() h = md5(pcap).hexdigest() result = handle_pcap_file("%s.pcap" % h, pcap, self.obj.source, user=self.current_task.user, description='Created %s' % (scandate), related_id=str(self.obj.id), related_type="Sample", method=self.name, reference=None, relationship=RelationshipTypes.RELATED_TO) self._add_result("pcap added", h, {'md5': h})
[ "def", "_process_pcap", "(", "self", ",", "pcap", ",", "scandate", ")", ":", "self", ".", "_info", "(", "\"Adding PCAP and creating relationship to %s\"", "%", "(", "str", "(", "self", ".", "obj", ".", "id", ")", ")", ")", "self", ".", "_notify", "(", ")", "h", "=", "md5", "(", "pcap", ")", ".", "hexdigest", "(", ")", "result", "=", "handle_pcap_file", "(", "\"%s.pcap\"", "%", "h", ",", "pcap", ",", "self", ".", "obj", ".", "source", ",", "user", "=", "self", ".", "current_task", ".", "user", ",", "description", "=", "'Created %s'", "%", "(", "scandate", ")", ",", "related_id", "=", "str", "(", "self", ".", "obj", ".", "id", ")", ",", "related_type", "=", "\"Sample\"", ",", "method", "=", "self", ".", "name", ",", "reference", "=", "None", ",", "relationship", "=", "RelationshipTypes", ".", "RELATED_TO", ")", "self", ".", "_add_result", "(", "\"pcap added\"", ",", "h", ",", "{", "'md5'", ":", "h", "}", ")" ]
https://github.com/crits/crits_services/blob/c7abf91f1865d913cffad4b966599da204f8ae43/virustotal_service/__init__.py#L776-L800
kayousterhout/trace-analysis
a20a1ac8dc2f93d881b22095ab43d2209923e52b
upload.py
python
VersionControlSystem.IsImage
(self, filename)
return mimetype.startswith("image/")
Returns true if the filename has an image extension.
Returns true if the filename has an image extension.
[ "Returns", "true", "if", "the", "filename", "has", "an", "image", "extension", "." ]
def IsImage(self, filename): """Returns true if the filename has an image extension.""" mimetype = mimetypes.guess_type(filename)[0] if not mimetype: return False return mimetype.startswith("image/")
[ "def", "IsImage", "(", "self", ",", "filename", ")", ":", "mimetype", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "[", "0", "]", "if", "not", "mimetype", ":", "return", "False", "return", "mimetype", ".", "startswith", "(", "\"image/\"", ")" ]
https://github.com/kayousterhout/trace-analysis/blob/a20a1ac8dc2f93d881b22095ab43d2209923e52b/upload.py#L940-L945
marcosfede/algorithms
1ee7c815f9d556c9cef4d4b0d21ee3a409d21629
adventofcode/2019/d23/d23.py
python
VM.__init__
(self, program)
[]
def __init__(self, program): self.pointer = 0 self.program = defaultdict(int, enumerate(program)) self.input = [] self.output = [] self.done = False self.base = 0 self.op_params = { 1: 3, 2: 3, 3: 1, 4: 1, 5: 2, 6: 2, 7: 3, 8: 3, 9: 1 }
[ "def", "__init__", "(", "self", ",", "program", ")", ":", "self", ".", "pointer", "=", "0", "self", ".", "program", "=", "defaultdict", "(", "int", ",", "enumerate", "(", "program", ")", ")", "self", ".", "input", "=", "[", "]", "self", ".", "output", "=", "[", "]", "self", ".", "done", "=", "False", "self", ".", "base", "=", "0", "self", ".", "op_params", "=", "{", "1", ":", "3", ",", "2", ":", "3", ",", "3", ":", "1", ",", "4", ":", "1", ",", "5", ":", "2", ",", "6", ":", "2", ",", "7", ":", "3", ",", "8", ":", "3", ",", "9", ":", "1", "}" ]
https://github.com/marcosfede/algorithms/blob/1ee7c815f9d556c9cef4d4b0d21ee3a409d21629/adventofcode/2019/d23/d23.py#L10-L20
PacktPublishing/Hands-On-Intelligent-Agents-with-OpenAI-Gym
14ab6fc82018e48de130e87671cca6c57456d1a5
ch7/carla-gym/carla_gym/envs/carla/client.py
python
CarlaClient.send_control
(self, *args, **kwargs)
Send the VehicleControl to be applied this frame. If synchronous mode was requested, the server will pause the simulation until this message is received.
Send the VehicleControl to be applied this frame.
[ "Send", "the", "VehicleControl", "to", "be", "applied", "this", "frame", "." ]
def send_control(self, *args, **kwargs): """ Send the VehicleControl to be applied this frame. If synchronous mode was requested, the server will pause the simulation until this message is received. """ if isinstance(args[0] if args else None, carla_protocol.Control): pb_message = args[0] else: pb_message = carla_protocol.Control() pb_message.steer = kwargs.get('steer', 0.0) pb_message.throttle = kwargs.get('throttle', 0.0) pb_message.brake = kwargs.get('brake', 0.0) pb_message.hand_brake = kwargs.get('hand_brake', False) pb_message.reverse = kwargs.get('reverse', False) self._control_client.write(pb_message.SerializeToString())
[ "def", "send_control", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", "if", "args", "else", "None", ",", "carla_protocol", ".", "Control", ")", ":", "pb_message", "=", "args", "[", "0", "]", "else", ":", "pb_message", "=", "carla_protocol", ".", "Control", "(", ")", "pb_message", ".", "steer", "=", "kwargs", ".", "get", "(", "'steer'", ",", "0.0", ")", "pb_message", ".", "throttle", "=", "kwargs", ".", "get", "(", "'throttle'", ",", "0.0", ")", "pb_message", ".", "brake", "=", "kwargs", ".", "get", "(", "'brake'", ",", "0.0", ")", "pb_message", ".", "hand_brake", "=", "kwargs", ".", "get", "(", "'hand_brake'", ",", "False", ")", "pb_message", ".", "reverse", "=", "kwargs", ".", "get", "(", "'reverse'", ",", "False", ")", "self", ".", "_control_client", ".", "write", "(", "pb_message", ".", "SerializeToString", "(", ")", ")" ]
https://github.com/PacktPublishing/Hands-On-Intelligent-Agents-with-OpenAI-Gym/blob/14ab6fc82018e48de130e87671cca6c57456d1a5/ch7/carla-gym/carla_gym/envs/carla/client.py#L129-L145
gentoo/portage
e5be73709b1a42b40380fd336f9381452b01a723
lib/_emerge/EbuildBuild.py
python
EbuildBuild._buildpkg_exit
(self, packager)
Released build dir lock when there is a failure or when in buildpkgonly mode. Otherwise, the lock will be released when merge() is called.
Released build dir lock when there is a failure or when in buildpkgonly mode. Otherwise, the lock will be released when merge() is called.
[ "Released", "build", "dir", "lock", "when", "there", "is", "a", "failure", "or", "when", "in", "buildpkgonly", "mode", ".", "Otherwise", "the", "lock", "will", "be", "released", "when", "merge", "()", "is", "called", "." ]
def _buildpkg_exit(self, packager): """ Released build dir lock when there is a failure or when in buildpkgonly mode. Otherwise, the lock will be released when merge() is called. """ if self._default_exit(packager) != os.EX_OK: self._async_unlock_builddir(returncode=self.returncode) return if self.opts.buildpkgonly: phase = "success_hooks" success_hooks = MiscFunctionsProcess( background=self.background, commands=[phase], phase=phase, scheduler=self.scheduler, settings=self.settings, ) self._start_task(success_hooks, self._buildpkgonly_success_hook_exit) return # Continue holding the builddir lock until # after the package has been installed. self._current_task = None self.returncode = packager.returncode self.wait()
[ "def", "_buildpkg_exit", "(", "self", ",", "packager", ")", ":", "if", "self", ".", "_default_exit", "(", "packager", ")", "!=", "os", ".", "EX_OK", ":", "self", ".", "_async_unlock_builddir", "(", "returncode", "=", "self", ".", "returncode", ")", "return", "if", "self", ".", "opts", ".", "buildpkgonly", ":", "phase", "=", "\"success_hooks\"", "success_hooks", "=", "MiscFunctionsProcess", "(", "background", "=", "self", ".", "background", ",", "commands", "=", "[", "phase", "]", ",", "phase", "=", "phase", ",", "scheduler", "=", "self", ".", "scheduler", ",", "settings", "=", "self", ".", "settings", ",", ")", "self", ".", "_start_task", "(", "success_hooks", ",", "self", ".", "_buildpkgonly_success_hook_exit", ")", "return", "# Continue holding the builddir lock until", "# after the package has been installed.", "self", ".", "_current_task", "=", "None", "self", ".", "returncode", "=", "packager", ".", "returncode", "self", ".", "wait", "(", ")" ]
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/_emerge/EbuildBuild.py#L522-L549
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/gsim/bradley_2013.py
python
_get_SRF_sigma
(imt_per)
return srf
Table 8 and equation 19 of 2013 report. NB change in notation, 2013 report calls this term 'sigma_t' but it is referred to here as sigma. Note that Table 8 is identical to Table 7 in the 2013 report.
Table 8 and equation 19 of 2013 report. NB change in notation, 2013 report calls this term 'sigma_t' but it is referred to here as sigma. Note that Table 8 is identical to Table 7 in the 2013 report.
[ "Table", "8", "and", "equation", "19", "of", "2013", "report", ".", "NB", "change", "in", "notation", "2013", "report", "calls", "this", "term", "sigma_t", "but", "it", "is", "referred", "to", "here", "as", "sigma", ".", "Note", "that", "Table", "8", "is", "identical", "to", "Table", "7", "in", "the", "2013", "report", "." ]
def _get_SRF_sigma(imt_per): """ Table 8 and equation 19 of 2013 report. NB change in notation, 2013 report calls this term 'sigma_t' but it is referred to here as sigma. Note that Table 8 is identical to Table 7 in the 2013 report. """ if imt_per < 0.6: srf = 0.8 elif 0.6 <= imt_per < 1: srf = _interp_function(0.7, 0.8, 1, 0.6, imt_per) elif 1 <= imt_per <= 10: srf = _interp_function(0.6, 0.7, 10, 1, imt_per) else: srf = 1 return srf
[ "def", "_get_SRF_sigma", "(", "imt_per", ")", ":", "if", "imt_per", "<", "0.6", ":", "srf", "=", "0.8", "elif", "0.6", "<=", "imt_per", "<", "1", ":", "srf", "=", "_interp_function", "(", "0.7", ",", "0.8", ",", "1", ",", "0.6", ",", "imt_per", ")", "elif", "1", "<=", "imt_per", "<=", "10", ":", "srf", "=", "_interp_function", "(", "0.6", ",", "0.7", ",", "10", ",", "1", ",", "imt_per", ")", "else", ":", "srf", "=", "1", "return", "srf" ]
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/gsim/bradley_2013.py#L110-L126
Ericsson/codechecker
c4e43f62dc3acbf71d3109b337db7c97f7852f43
analyzer/tools/statistics_collector/codechecker_statistics_collector/collectors/special_return_value.py
python
SpecialReturnValueCollector.process_line
(self, line)
Match regex on the line.
Match regex on the line.
[ "Match", "regex", "on", "the", "line", "." ]
def process_line(self, line): """ Match regex on the line. """ m = self.special_ret_val_regexp.match(line) if m: func = m.group(1) ret_negative = m.group(2) ret_null = m.group(3) self.stats['total'][func] += 1 self.stats['nof_negative'][func] += int(ret_negative) self.stats['nof_null'][func] += int(ret_null)
[ "def", "process_line", "(", "self", ",", "line", ")", ":", "m", "=", "self", ".", "special_ret_val_regexp", ".", "match", "(", "line", ")", "if", "m", ":", "func", "=", "m", ".", "group", "(", "1", ")", "ret_negative", "=", "m", ".", "group", "(", "2", ")", "ret_null", "=", "m", ".", "group", "(", "3", ")", "self", ".", "stats", "[", "'total'", "]", "[", "func", "]", "+=", "1", "self", ".", "stats", "[", "'nof_negative'", "]", "[", "func", "]", "+=", "int", "(", "ret_negative", ")", "self", ".", "stats", "[", "'nof_null'", "]", "[", "func", "]", "+=", "int", "(", "ret_null", ")" ]
https://github.com/Ericsson/codechecker/blob/c4e43f62dc3acbf71d3109b337db7c97f7852f43/analyzer/tools/statistics_collector/codechecker_statistics_collector/collectors/special_return_value.py#L68-L78
TypeError/secure
04dd035e560583baeba06f5714f273b542c07767
secure/headers.py
python
StrictTransportSecurity.include_subdomains
(self)
return self
Include subdomains to HSTS policy [Optional] :return: [description] :rtype: [type]
Include subdomains to HSTS policy [Optional]
[ "Include", "subdomains", "to", "HSTS", "policy", "[", "Optional", "]" ]
def include_subdomains(self) -> "StrictTransportSecurity": """Include subdomains to HSTS policy [Optional] :return: [description] :rtype: [type] """ self._build("includeSubDomains") return self
[ "def", "include_subdomains", "(", "self", ")", "->", "\"StrictTransportSecurity\"", ":", "self", ".", "_build", "(", "\"includeSubDomains\"", ")", "return", "self" ]
https://github.com/TypeError/secure/blob/04dd035e560583baeba06f5714f273b542c07767/secure/headers.py#L665-L672
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/polys/factortools.py
python
dup_zz_i_factor
(f, K0)
return coeff, factors
Factor univariate polynomials into irreducibles in `ZZ_I[x]`.
Factor univariate polynomials into irreducibles in `ZZ_I[x]`.
[ "Factor", "univariate", "polynomials", "into", "irreducibles", "in", "ZZ_I", "[", "x", "]", "." ]
def dup_zz_i_factor(f, K0): """Factor univariate polynomials into irreducibles in `ZZ_I[x]`. """ # First factor in QQ_I K1 = K0.get_field() f = dup_convert(f, K0, K1) coeff, factors = dup_qq_i_factor(f, K1) new_factors = [] for fac, i in factors: # Extract content fac_denom, fac_num = dup_clear_denoms(fac, K1) fac_num_ZZ_I = dup_convert(fac_num, K1, K0) content, fac_prim = dmp_ground_primitive(fac_num_ZZ_I, 0, K1) coeff = (coeff * content ** i) // fac_denom ** i new_factors.append((fac_prim, i)) factors = new_factors coeff = K0.convert(coeff, K1) return coeff, factors
[ "def", "dup_zz_i_factor", "(", "f", ",", "K0", ")", ":", "# First factor in QQ_I", "K1", "=", "K0", ".", "get_field", "(", ")", "f", "=", "dup_convert", "(", "f", ",", "K0", ",", "K1", ")", "coeff", ",", "factors", "=", "dup_qq_i_factor", "(", "f", ",", "K1", ")", "new_factors", "=", "[", "]", "for", "fac", ",", "i", "in", "factors", ":", "# Extract content", "fac_denom", ",", "fac_num", "=", "dup_clear_denoms", "(", "fac", ",", "K1", ")", "fac_num_ZZ_I", "=", "dup_convert", "(", "fac_num", ",", "K1", ",", "K0", ")", "content", ",", "fac_prim", "=", "dmp_ground_primitive", "(", "fac_num_ZZ_I", ",", "0", ",", "K1", ")", "coeff", "=", "(", "coeff", "*", "content", "**", "i", ")", "//", "fac_denom", "**", "i", "new_factors", ".", "append", "(", "(", "fac_prim", ",", "i", ")", ")", "factors", "=", "new_factors", "coeff", "=", "K0", ".", "convert", "(", "coeff", ",", "K1", ")", "return", "coeff", ",", "factors" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/polys/factortools.py#L1168-L1187
liaopeiyuan/ml-arsenal-public
f8938ce3cb58b35fc7cc20d096c39a85ec9780b2
projects/Doodle/YourVenn_code/code/net/layer/sync_batchnorm/batchnorm.py
python
_SynchronizedBatchNorm._data_parallel_master
(self, intermediates)
return outputs
Reduce the sum and square-sum, compute the statistics, and broadcast it.
Reduce the sum and square-sum, compute the statistics, and broadcast it.
[ "Reduce", "the", "sum", "and", "square", "-", "sum", "compute", "the", "statistics", "and", "broadcast", "it", "." ]
def _data_parallel_master(self, intermediates): """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" # Always using same "device order" makes the ReduceAdd operation faster. # Thanks to:: Tete Xiao (http://tetexiao.com/) intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) to_reduce = [i[1][:2] for i in intermediates] to_reduce = [j for i in to_reduce for j in i] # flatten target_gpus = [i[1].sum.get_device() for i in intermediates] sum_size = sum([i[1].sum_size for i in intermediates]) sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) broadcasted = Broadcast.apply(target_gpus, mean, inv_std) outputs = [] for i, rec in enumerate(intermediates): outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2]))) return outputs
[ "def", "_data_parallel_master", "(", "self", ",", "intermediates", ")", ":", "# Always using same \"device order\" makes the ReduceAdd operation faster.", "# Thanks to:: Tete Xiao (http://tetexiao.com/)", "intermediates", "=", "sorted", "(", "intermediates", ",", "key", "=", "lambda", "i", ":", "i", "[", "1", "]", ".", "sum", ".", "get_device", "(", ")", ")", "to_reduce", "=", "[", "i", "[", "1", "]", "[", ":", "2", "]", "for", "i", "in", "intermediates", "]", "to_reduce", "=", "[", "j", "for", "i", "in", "to_reduce", "for", "j", "in", "i", "]", "# flatten", "target_gpus", "=", "[", "i", "[", "1", "]", ".", "sum", ".", "get_device", "(", ")", "for", "i", "in", "intermediates", "]", "sum_size", "=", "sum", "(", "[", "i", "[", "1", "]", ".", "sum_size", "for", "i", "in", "intermediates", "]", ")", "sum_", ",", "ssum", "=", "ReduceAddCoalesced", ".", "apply", "(", "target_gpus", "[", "0", "]", ",", "2", ",", "*", "to_reduce", ")", "mean", ",", "inv_std", "=", "self", ".", "_compute_mean_std", "(", "sum_", ",", "ssum", ",", "sum_size", ")", "broadcasted", "=", "Broadcast", ".", "apply", "(", "target_gpus", ",", "mean", ",", "inv_std", ")", "outputs", "=", "[", "]", "for", "i", ",", "rec", "in", "enumerate", "(", "intermediates", ")", ":", "outputs", ".", "append", "(", "(", "rec", "[", "0", "]", ",", "_MasterMessage", "(", "*", "broadcasted", "[", "i", "*", "2", ":", "i", "*", "2", "+", "2", "]", ")", ")", ")", "return", "outputs" ]
https://github.com/liaopeiyuan/ml-arsenal-public/blob/f8938ce3cb58b35fc7cc20d096c39a85ec9780b2/projects/Doodle/YourVenn_code/code/net/layer/sync_batchnorm/batchnorm.py#L95-L116
vstinner/python-ptrace
a715d0f9bef4060022bfb6d25e25e148b2bd5f54
ptrace/linux_proc.py
python
readProcessStatm
(pid)
return statm
Read the process memory status ('statm') as a list of integers. Values are in bytes (and not in pages).
Read the process memory status ('statm') as a list of integers. Values are in bytes (and not in pages).
[ "Read", "the", "process", "memory", "status", "(", "statm", ")", "as", "a", "list", "of", "integers", ".", "Values", "are", "in", "bytes", "(", "and", "not", "in", "pages", ")", "." ]
def readProcessStatm(pid): """ Read the process memory status ('statm') as a list of integers. Values are in bytes (and not in pages). """ statm = readProcessProc(pid, 'statm') statm = [int(item) * PAGE_SIZE for item in statm.split()] return statm
[ "def", "readProcessStatm", "(", "pid", ")", ":", "statm", "=", "readProcessProc", "(", "pid", ",", "'statm'", ")", "statm", "=", "[", "int", "(", "item", ")", "*", "PAGE_SIZE", "for", "item", "in", "statm", ".", "split", "(", ")", "]", "return", "statm" ]
https://github.com/vstinner/python-ptrace/blob/a715d0f9bef4060022bfb6d25e25e148b2bd5f54/ptrace/linux_proc.py#L110-L117
Scalsol/mega.pytorch
a6aa6e0537b82d70da94228100a51e6a53d98f82
mega_core/modeling/rpn/rpn.py
python
RPNHeadFeatureSingleConv.__init__
(self, cfg, in_channels)
Arguments: cfg : config in_channels (int): number of channels of the input feature
Arguments: cfg : config in_channels (int): number of channels of the input feature
[ "Arguments", ":", "cfg", ":", "config", "in_channels", "(", "int", ")", ":", "number", "of", "channels", "of", "the", "input", "feature" ]
def __init__(self, cfg, in_channels): """ Arguments: cfg : config in_channels (int): number of channels of the input feature """ super(RPNHeadFeatureSingleConv, self).__init__() self.conv = nn.Conv2d( in_channels, in_channels, kernel_size=3, stride=1, padding=1 ) for l in [self.conv]: torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) self.out_channels = in_channels
[ "def", "__init__", "(", "self", ",", "cfg", ",", "in_channels", ")", ":", "super", "(", "RPNHeadFeatureSingleConv", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "conv", "=", "nn", ".", "Conv2d", "(", "in_channels", ",", "in_channels", ",", "kernel_size", "=", "3", ",", "stride", "=", "1", ",", "padding", "=", "1", ")", "for", "l", "in", "[", "self", ".", "conv", "]", ":", "torch", ".", "nn", ".", "init", ".", "normal_", "(", "l", ".", "weight", ",", "std", "=", "0.01", ")", "torch", ".", "nn", ".", "init", ".", "constant_", "(", "l", ".", "bias", ",", "0", ")", "self", ".", "out_channels", "=", "in_channels" ]
https://github.com/Scalsol/mega.pytorch/blob/a6aa6e0537b82d70da94228100a51e6a53d98f82/mega_core/modeling/rpn/rpn.py#L49-L64
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
ResourceCode/wswp-places-c573d29efa3a/web2py/applications/examples/controllers/cache_examples.py
python
cache_on_disk
()
return dict(time=t, link=A('click to reload', _href=URL(r=request)))
cache the output of the lambda function on disk
cache the output of the lambda function on disk
[ "cache", "the", "output", "of", "the", "lambda", "function", "on", "disk" ]
def cache_on_disk(): """cache the output of the lambda function on disk""" t = cache.disk('time', lambda: time.ctime(), time_expire=5) return dict(time=t, link=A('click to reload', _href=URL(r=request)))
[ "def", "cache_on_disk", "(", ")", ":", "t", "=", "cache", ".", "disk", "(", "'time'", ",", "lambda", ":", "time", ".", "ctime", "(", ")", ",", "time_expire", "=", "5", ")", "return", "dict", "(", "time", "=", "t", ",", "link", "=", "A", "(", "'click to reload'", ",", "_href", "=", "URL", "(", "r", "=", "request", ")", ")", ")" ]
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/ResourceCode/wswp-places-c573d29efa3a/web2py/applications/examples/controllers/cache_examples.py#L12-L16
GoogleCloudPlatform/gsutil
5be882803e76608e2fd29cf8c504ccd1fe0a7746
gslib/utils/system_util.py
python
GetDiskCounters
()
return retdict
Retrieves disk I/O statistics for all disks. Adapted from the psutil module's psutil._pslinux.disk_io_counters: http://code.google.com/p/psutil/source/browse/trunk/psutil/_pslinux.py Originally distributed under under a BSD license. Original Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola. Returns: A dictionary containing disk names mapped to the disk counters from /disk/diskstats.
Retrieves disk I/O statistics for all disks.
[ "Retrieves", "disk", "I", "/", "O", "statistics", "for", "all", "disks", "." ]
def GetDiskCounters(): """Retrieves disk I/O statistics for all disks. Adapted from the psutil module's psutil._pslinux.disk_io_counters: http://code.google.com/p/psutil/source/browse/trunk/psutil/_pslinux.py Originally distributed under under a BSD license. Original Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola. Returns: A dictionary containing disk names mapped to the disk counters from /disk/diskstats. """ # iostat documentation states that sectors are equivalent with blocks and # have a size of 512 bytes since 2.4 kernels. This value is needed to # calculate the amount of disk I/O in bytes. sector_size = 512 partitions = [] with open('/proc/partitions', 'r') as f: lines = f.readlines()[2:] for line in lines: _, _, _, name = line.split() if name[-1].isdigit(): partitions.append(name) retdict = {} with open('/proc/diskstats', 'r') as f: for line in f: values = line.split()[:11] _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = values if name in partitions: rbytes = int(rbytes) * sector_size wbytes = int(wbytes) * sector_size reads = int(reads) writes = int(writes) rtime = int(rtime) wtime = int(wtime) retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime) return retdict
[ "def", "GetDiskCounters", "(", ")", ":", "# iostat documentation states that sectors are equivalent with blocks and", "# have a size of 512 bytes since 2.4 kernels. This value is needed to", "# calculate the amount of disk I/O in bytes.", "sector_size", "=", "512", "partitions", "=", "[", "]", "with", "open", "(", "'/proc/partitions'", ",", "'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "[", "2", ":", "]", "for", "line", "in", "lines", ":", "_", ",", "_", ",", "_", ",", "name", "=", "line", ".", "split", "(", ")", "if", "name", "[", "-", "1", "]", ".", "isdigit", "(", ")", ":", "partitions", ".", "append", "(", "name", ")", "retdict", "=", "{", "}", "with", "open", "(", "'/proc/diskstats'", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "values", "=", "line", ".", "split", "(", ")", "[", ":", "11", "]", "_", ",", "_", ",", "name", ",", "reads", ",", "_", ",", "rbytes", ",", "rtime", ",", "writes", ",", "_", ",", "wbytes", ",", "wtime", "=", "values", "if", "name", "in", "partitions", ":", "rbytes", "=", "int", "(", "rbytes", ")", "*", "sector_size", "wbytes", "=", "int", "(", "wbytes", ")", "*", "sector_size", "reads", "=", "int", "(", "reads", ")", "writes", "=", "int", "(", "writes", ")", "rtime", "=", "int", "(", "rtime", ")", "wtime", "=", "int", "(", "wtime", ")", "retdict", "[", "name", "]", "=", "(", "reads", ",", "writes", ",", "rbytes", ",", "wbytes", ",", "rtime", ",", "wtime", ")", "return", "retdict" ]
https://github.com/GoogleCloudPlatform/gsutil/blob/5be882803e76608e2fd29cf8c504ccd1fe0a7746/gslib/utils/system_util.py#L121-L160
wenwei202/terngrad
ec4f75e9a3a1e1c4b2e6494d830fbdfdd2e03ddc
slim/nets/vgg.py
python
vgg_16
(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_16', fc_conv_padding='VALID')
Oxford Net VGG 16-Layers version D Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. Returns: the last op containing the log predictions and end_points dict.
Oxford Net VGG 16-Layers version D Example.
[ "Oxford", "Net", "VGG", "16", "-", "Layers", "version", "D", "Example", "." ]
def vgg_16(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_16', fc_conv_padding='VALID'): """Oxford Net VGG 16-Layers version D Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. fc_conv_padding: the type of padding to use for the fully connected layer that is implemented as a convolutional layer. Use 'SAME' padding if you are applying the network in a fully convolutional manner and want to get a prediction map downsampled by a factor of 32 as an output. Otherwise, the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. Returns: the last op containing the log predictions and end_points dict. """ with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc: end_points_collection = sc.name + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d. with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection): net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.max_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], scope='pool5') # Use conv2d instead of fully_connected layers. net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6') net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6') net = slim.conv2d(net, 4096, [1, 1], scope='fc7') net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7') net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8') # Convert end_points_collection into a end_point dict. end_points = slim.utils.convert_collection_to_dict(end_points_collection) if spatial_squeeze: net = tf.squeeze(net, [1, 2], name='fc8/squeezed') end_points[sc.name + '/fc8'] = net return net, end_points
[ "def", "vgg_16", "(", "inputs", ",", "num_classes", "=", "1000", ",", "is_training", "=", "True", ",", "dropout_keep_prob", "=", "0.5", ",", "spatial_squeeze", "=", "True", ",", "scope", "=", "'vgg_16'", ",", "fc_conv_padding", "=", "'VALID'", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'vgg_16'", ",", "[", "inputs", "]", ")", "as", "sc", ":", "end_points_collection", "=", "sc", ".", "name", "+", "'_end_points'", "# Collect outputs for conv2d, fully_connected and max_pool2d.", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", ",", "slim", ".", "fully_connected", ",", "slim", ".", "max_pool2d", "]", ",", "outputs_collections", "=", "end_points_collection", ")", ":", "net", "=", "slim", ".", "repeat", "(", "inputs", ",", "2", ",", "slim", ".", "conv2d", ",", "64", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'conv1'", ")", "net", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "2", ",", "2", "]", ",", "scope", "=", "'pool1'", ")", "net", "=", "slim", ".", "repeat", "(", "net", ",", "2", ",", "slim", ".", "conv2d", ",", "128", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'conv2'", ")", "net", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "2", ",", "2", "]", ",", "scope", "=", "'pool2'", ")", "net", "=", "slim", ".", "repeat", "(", "net", ",", "3", ",", "slim", ".", "conv2d", ",", "256", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'conv3'", ")", "net", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "2", ",", "2", "]", ",", "scope", "=", "'pool3'", ")", "net", "=", "slim", ".", "repeat", "(", "net", ",", "3", ",", "slim", ".", "conv2d", ",", "512", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'conv4'", ")", "net", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "2", ",", "2", "]", ",", "scope", "=", "'pool4'", ")", "net", "=", "slim", ".", "repeat", "(", "net", ",", "3", ",", "slim", ".", "conv2d", ",", "512", ",", "[", "3", ",", "3", "]", ",", "scope", "=", "'conv5'", ")", "net", "=", "slim", ".", "max_pool2d", "(", "net", ",", "[", "2", ",", "2", "]", ",", "scope", "=", "'pool5'", ")", "# Use conv2d instead of fully_connected layers.", "net", "=", "slim", ".", "conv2d", "(", "net", ",", "4096", ",", "[", "7", ",", "7", "]", ",", "padding", "=", "fc_conv_padding", ",", "scope", "=", "'fc6'", ")", "net", "=", "slim", ".", "dropout", "(", "net", ",", "dropout_keep_prob", ",", "is_training", "=", "is_training", ",", "scope", "=", "'dropout6'", ")", "net", "=", "slim", ".", "conv2d", "(", "net", ",", "4096", ",", "[", "1", ",", "1", "]", ",", "scope", "=", "'fc7'", ")", "net", "=", "slim", ".", "dropout", "(", "net", ",", "dropout_keep_prob", ",", "is_training", "=", "is_training", ",", "scope", "=", "'dropout7'", ")", "net", "=", "slim", ".", "conv2d", "(", "net", ",", "num_classes", ",", "[", "1", ",", "1", "]", ",", "activation_fn", "=", "None", ",", "normalizer_fn", "=", "None", ",", "scope", "=", "'fc8'", ")", "# Convert end_points_collection into a end_point dict.", "end_points", "=", "slim", ".", "utils", ".", "convert_collection_to_dict", "(", "end_points_collection", ")", "if", "spatial_squeeze", ":", "net", "=", "tf", ".", "squeeze", "(", "net", ",", "[", "1", ",", "2", "]", ",", "name", "=", "'fc8/squeezed'", ")", "end_points", "[", "sc", ".", "name", "+", "'/fc8'", "]", "=", "net", "return", "net", ",", "end_points" ]
https://github.com/wenwei202/terngrad/blob/ec4f75e9a3a1e1c4b2e6494d830fbdfdd2e03ddc/slim/nets/vgg.py#L131-L192
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vod/v20180717/vod_client.py
python
VodClient.SplitMedia
(self, request)
åÆ¹ē‚¹ę’­č§†é¢‘čæ›č”Œę‹†ę”ļ¼Œē”Ÿęˆå¤šäøŖę–°ēš„ē‚¹ę’­č§†é¢‘ć€‚ :param request: Request instance for SplitMedia. :type request: :class:`tencentcloud.vod.v20180717.models.SplitMediaRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.SplitMediaResponse`
åÆ¹ē‚¹ę’­č§†é¢‘čæ›č”Œę‹†ę”ļ¼Œē”Ÿęˆå¤šäøŖę–°ēš„ē‚¹ę’­č§†é¢‘ć€‚
[ "åÆ¹ē‚¹ę’­č§†é¢‘čæ›č”Œę‹†ę”ļ¼Œē”Ÿęˆå¤šäøŖę–°ēš„ē‚¹ę’­č§†é¢‘ć€‚" ]
def SplitMedia(self, request): """åÆ¹ē‚¹ę’­č§†é¢‘čæ›č”Œę‹†ę”ļ¼Œē”Ÿęˆå¤šäøŖę–°ēš„ē‚¹ę’­č§†é¢‘ć€‚ :param request: Request instance for SplitMedia. :type request: :class:`tencentcloud.vod.v20180717.models.SplitMediaRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.SplitMediaResponse` """ try: params = request._serialize() body = self.call("SplitMedia", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.SplitMediaResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message)
[ "def", "SplitMedia", "(", "self", ",", "request", ")", ":", "try", ":", "params", "=", "request", ".", "_serialize", "(", ")", "body", "=", "self", ".", "call", "(", "\"SplitMedia\"", ",", "params", ")", "response", "=", "json", ".", "loads", "(", "body", ")", "if", "\"Error\"", "not", "in", "response", "[", "\"Response\"", "]", ":", "model", "=", "models", ".", "SplitMediaResponse", "(", ")", "model", ".", "_deserialize", "(", "response", "[", "\"Response\"", "]", ")", "return", "model", "else", ":", "code", "=", "response", "[", "\"Response\"", "]", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "message", "=", "response", "[", "\"Response\"", "]", "[", "\"Error\"", "]", "[", "\"Message\"", "]", "reqid", "=", "response", "[", "\"Response\"", "]", "[", "\"RequestId\"", "]", "raise", "TencentCloudSDKException", "(", "code", ",", "message", ",", "reqid", ")", "except", "Exception", "as", "e", ":", "if", "isinstance", "(", "e", ",", "TencentCloudSDKException", ")", ":", "raise", "else", ":", "raise", "TencentCloudSDKException", "(", "e", ".", "message", ",", "e", ".", "message", ")" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vod/v20180717/vod_client.py#L3449-L3474
facebookresearch/ReAgent
52f666670a7fa03206812ef48949f6b934d400f7
reagent/gym/policies/predictor_policies.py
python
DiscreteDQNPredictorPolicy.act
( self, obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]], possible_actions_mask: Optional[torch.Tensor], )
return self.sampler.sample_action(scores).cpu().detach()
Input is either state_with_presence, or ServingFeatureData (in the case of sparse features)
Input is either state_with_presence, or ServingFeatureData (in the case of sparse features)
[ "Input", "is", "either", "state_with_presence", "or", "ServingFeatureData", "(", "in", "the", "case", "of", "sparse", "features", ")" ]
def act( self, obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]], possible_actions_mask: Optional[torch.Tensor], ) -> rlt.ActorOutput: """Input is either state_with_presence, or ServingFeatureData (in the case of sparse features)""" assert isinstance(obs, tuple) if isinstance(obs, rlt.ServingFeatureData): state: rlt.ServingFeatureData = obs else: state = rlt.ServingFeatureData( float_features_with_presence=obs, id_list_features={}, id_score_list_features={}, ) scores = self.scorer(state, possible_actions_mask) return self.sampler.sample_action(scores).cpu().detach()
[ "def", "act", "(", "self", ",", "obs", ":", "Union", "[", "rlt", ".", "ServingFeatureData", ",", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", "]", ",", "possible_actions_mask", ":", "Optional", "[", "torch", ".", "Tensor", "]", ",", ")", "->", "rlt", ".", "ActorOutput", ":", "assert", "isinstance", "(", "obs", ",", "tuple", ")", "if", "isinstance", "(", "obs", ",", "rlt", ".", "ServingFeatureData", ")", ":", "state", ":", "rlt", ".", "ServingFeatureData", "=", "obs", "else", ":", "state", "=", "rlt", ".", "ServingFeatureData", "(", "float_features_with_presence", "=", "obs", ",", "id_list_features", "=", "{", "}", ",", "id_score_list_features", "=", "{", "}", ",", ")", "scores", "=", "self", ".", "scorer", "(", "state", ",", "possible_actions_mask", ")", "return", "self", ".", "sampler", ".", "sample_action", "(", "scores", ")", ".", "cpu", "(", ")", ".", "detach", "(", ")" ]
https://github.com/facebookresearch/ReAgent/blob/52f666670a7fa03206812ef48949f6b934d400f7/reagent/gym/policies/predictor_policies.py#L87-L104
brython-dev/brython
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
www/src/Lib/shutil.py
python
get_unpack_formats
()
return formats
Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description)
Returns a list of supported formats for unpacking.
[ "Returns", "a", "list", "of", "supported", "formats", "for", "unpacking", "." ]
def get_unpack_formats(): """Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description) """ formats = [(name, info[0], info[3]) for name, info in _UNPACK_FORMATS.items()] formats.sort() return formats
[ "def", "get_unpack_formats", "(", ")", ":", "formats", "=", "[", "(", "name", ",", "info", "[", "0", "]", ",", "info", "[", "3", "]", ")", "for", "name", ",", "info", "in", "_UNPACK_FORMATS", ".", "items", "(", ")", "]", "formats", ".", "sort", "(", ")", "return", "formats" ]
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/shutil.py#L1094-L1103
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
hdfs_namenode/datadog_checks/hdfs_namenode/config_models/defaults.py
python
instance_service
(field, value)
return get_default_field_value(field, value)
[]
def instance_service(field, value): return get_default_field_value(field, value)
[ "def", "instance_service", "(", "field", ",", "value", ")", ":", "return", "get_default_field_value", "(", "field", ",", "value", ")" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/hdfs_namenode/datadog_checks/hdfs_namenode/config_models/defaults.py#L133-L134
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pandas/core/categorical.py
python
Categorical.add_categories
(self, new_categories, inplace=False)
Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Raises ------ ValueError If the new categories include old categories or do not validate as categories Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : boolean (default: False) Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. See also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories
Add new categories.
[ "Add", "new", "categories", "." ]
def add_categories(self, new_categories, inplace=False): """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Raises ------ ValueError If the new categories include old categories or do not validate as categories Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : boolean (default: False) Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. See also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories """ if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self._categories) if len(already_included) != 0: msg = ("new categories must not include old categories: %s" % str(already_included)) raise ValueError(msg) new_categories = list(self._categories) + list(new_categories) cat = self if inplace else self.copy() cat._categories = self._validate_categories(new_categories) cat._codes = _coerce_indexer_dtype(cat._codes, new_categories) if not inplace: return cat
[ "def", "add_categories", "(", "self", ",", "new_categories", ",", "inplace", "=", "False", ")", ":", "if", "not", "is_list_like", "(", "new_categories", ")", ":", "new_categories", "=", "[", "new_categories", "]", "already_included", "=", "set", "(", "new_categories", ")", "&", "set", "(", "self", ".", "_categories", ")", "if", "len", "(", "already_included", ")", "!=", "0", ":", "msg", "=", "(", "\"new categories must not include old categories: %s\"", "%", "str", "(", "already_included", ")", ")", "raise", "ValueError", "(", "msg", ")", "new_categories", "=", "list", "(", "self", ".", "_categories", ")", "+", "list", "(", "new_categories", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "cat", ".", "_categories", "=", "self", ".", "_validate_categories", "(", "new_categories", ")", "cat", ".", "_codes", "=", "_coerce_indexer_dtype", "(", "cat", ".", "_codes", ",", "new_categories", ")", "if", "not", "inplace", ":", "return", "cat" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/core/categorical.py#L803-L847
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/variable_registry_update_request_entity.py
python
VariableRegistryUpdateRequestEntity.request
(self)
return self._request
Gets the request of this VariableRegistryUpdateRequestEntity. The Variable Registry Update Request :return: The request of this VariableRegistryUpdateRequestEntity. :rtype: VariableRegistryUpdateRequestDTO
Gets the request of this VariableRegistryUpdateRequestEntity. The Variable Registry Update Request
[ "Gets", "the", "request", "of", "this", "VariableRegistryUpdateRequestEntity", ".", "The", "Variable", "Registry", "Update", "Request" ]
def request(self): """ Gets the request of this VariableRegistryUpdateRequestEntity. The Variable Registry Update Request :return: The request of this VariableRegistryUpdateRequestEntity. :rtype: VariableRegistryUpdateRequestDTO """ return self._request
[ "def", "request", "(", "self", ")", ":", "return", "self", ".", "_request" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/variable_registry_update_request_entity.py#L57-L65
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py
python
PackageIndex.check_credentials
(self)
Check that ``username`` and ``password`` have been set, and raise an exception if not.
Check that ``username`` and ``password`` have been set, and raise an exception if not.
[ "Check", "that", "username", "and", "password", "have", "been", "set", "and", "raise", "an", "exception", "if", "not", "." ]
def check_credentials(self): """ Check that ``username`` and ``password`` have been set, and raise an exception if not. """ if self.username is None or self.password is None: raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() _, netloc, _, _, _, _ = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm)
[ "def", "check_credentials", "(", "self", ")", ":", "if", "self", ".", "username", "is", "None", "or", "self", ".", "password", "is", "None", ":", "raise", "DistlibException", "(", "'username and password must be set'", ")", "pm", "=", "HTTPPasswordMgr", "(", ")", "_", ",", "netloc", ",", "_", ",", "_", ",", "_", ",", "_", "=", "urlparse", "(", "self", ".", "url", ")", "pm", ".", "add_password", "(", "self", ".", "realm", ",", "netloc", ",", "self", ".", "username", ",", "self", ".", "password", ")", "self", ".", "password_handler", "=", "HTTPBasicAuthHandler", "(", "pm", ")" ]
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py#L101-L111
iGio90/Dwarf
bb3011cdffd209c7e3f5febe558053bf649ca69c
dwarf_debugger/ui/widgets/hex_edit.py
python
HexEditor._clear_error
(self)
resets error
resets error
[ "resets", "error" ]
def _clear_error(self): """ resets error """ self._error_timer.stop() self._error_message = '' self._force_repaint(True)
[ "def", "_clear_error", "(", "self", ")", ":", "self", ".", "_error_timer", ".", "stop", "(", ")", "self", ".", "_error_message", "=", "''", "self", ".", "_force_repaint", "(", "True", ")" ]
https://github.com/iGio90/Dwarf/blob/bb3011cdffd209c7e3f5febe558053bf649ca69c/dwarf_debugger/ui/widgets/hex_edit.py#L1097-L1102
coleifer/walrus
2583ac8ba81d3f6aa43fccbe28c0c13b99a1fa9d
walrus/containers.py
python
Stream.__getitem__
(self, item)
return self.get(item)
Read a range of values from a stream. The index must be a message id or a slice. An empty slice will result in reading all values from the stream. Message ids provided as lower or upper bounds are inclusive. To specify a maximum number of messages, use the "step" parameter of the slice.
Read a range of values from a stream.
[ "Read", "a", "range", "of", "values", "from", "a", "stream", "." ]
def __getitem__(self, item): """ Read a range of values from a stream. The index must be a message id or a slice. An empty slice will result in reading all values from the stream. Message ids provided as lower or upper bounds are inclusive. To specify a maximum number of messages, use the "step" parameter of the slice. """ if isinstance(item, slice): return self.range(item.start or '-', item.stop or '+', item.step) return self.get(item)
[ "def", "__getitem__", "(", "self", ",", "item", ")", ":", "if", "isinstance", "(", "item", ",", "slice", ")", ":", "return", "self", ".", "range", "(", "item", ".", "start", "or", "'-'", ",", "item", ".", "stop", "or", "'+'", ",", "item", ".", "step", ")", "return", "self", ".", "get", "(", "item", ")" ]
https://github.com/coleifer/walrus/blob/2583ac8ba81d3f6aa43fccbe28c0c13b99a1fa9d/walrus/containers.py#L1075-L1088
numba/numba
bf480b9e0da858a65508c2b17759a72ee6a44c51
numba/core/typing/templates.py
python
FunctionTemplate.__str__
(self)
return f"<{self.__class__.__name__} {srcinfo}>"
[]
def __str__(self): info = self.get_template_info() srcinfo = f"{info['filename']}:{info['lines'][0]}" return f"<{self.__class__.__name__} {srcinfo}>"
[ "def", "__str__", "(", "self", ")", ":", "info", "=", "self", ".", "get_template_info", "(", ")", "srcinfo", "=", "f\"{info['filename']}:{info['lines'][0]}\"", "return", "f\"<{self.__class__.__name__} {srcinfo}>\"" ]
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/typing/templates.py#L334-L337
fancompute/neuroptica
7bc3c152f2713780b88e701744b0541175b12111
neuroptica/losses.py
python
Loss.L
(X: np.ndarray, T: np.ndarray)
The scalar, real-valued loss function (vectorized over multiple X, T inputs) :param X: the output of the network :param T: the target output :return: loss function for each X
The scalar, real-valued loss function (vectorized over multiple X, T inputs) :param X: the output of the network :param T: the target output :return: loss function for each X
[ "The", "scalar", "real", "-", "valued", "loss", "function", "(", "vectorized", "over", "multiple", "X", "T", "inputs", ")", ":", "param", "X", ":", "the", "output", "of", "the", "network", ":", "param", "T", ":", "the", "target", "output", ":", "return", ":", "loss", "function", "for", "each", "X" ]
def L(X: np.ndarray, T: np.ndarray) -> np.ndarray: ''' The scalar, real-valued loss function (vectorized over multiple X, T inputs) :param X: the output of the network :param T: the target output :return: loss function for each X ''' raise NotImplementedError("Loss function must be specified in child class")
[ "def", "L", "(", "X", ":", "np", ".", "ndarray", ",", "T", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "raise", "NotImplementedError", "(", "\"Loss function must be specified in child class\"", ")" ]
https://github.com/fancompute/neuroptica/blob/7bc3c152f2713780b88e701744b0541175b12111/neuroptica/losses.py#L9-L16
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/OrangeCanvas/document/schemeedit.py
python
SchemeEditWidget.path
(self)
return self.__path
Return the path associated with the scheme
Return the path associated with the scheme
[ "Return", "the", "path", "associated", "with", "the", "scheme" ]
def path(self): """ Return the path associated with the scheme """ return self.__path
[ "def", "path", "(", "self", ")", ":", "return", "self", ".", "__path" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/OrangeCanvas/document/schemeedit.py#L582-L586
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/codeintel/play/core.py
python
QueryNewPaletteEvent.__init__
(self, *args, **kwargs)
__init__(int winid=0) -> QueryNewPaletteEvent
__init__(int winid=0) -> QueryNewPaletteEvent
[ "__init__", "(", "int", "winid", "=", "0", ")", "-", ">", "QueryNewPaletteEvent" ]
def __init__(self, *args, **kwargs): """__init__(int winid=0) -> QueryNewPaletteEvent""" newobj = _core.new_QueryNewPaletteEvent(*args, **kwargs) self.this = newobj.this self.thisown = 1 del newobj.thisown
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "newobj", "=", "_core", ".", "new_QueryNewPaletteEvent", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "this", "=", "newobj", ".", "this", "self", ".", "thisown", "=", "1", "del", "newobj", ".", "thisown" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/play/core.py#L4188-L4193
yongzhuo/Keras-TextClassification
640e3f44f90d9d8046546f7e1a93a29ebe5c8d30
keras_textclassification/data_preprocess/generator_preprocess.py
python
PreprocessGenerator.prereocess_pred_xid
(self, pred)
[]
def prereocess_pred_xid(self, pred): if os.path.exists(self.path_fast_text_model_l2i_i2l): pred_l2i = {} l2i = self.l2i_i2l['l2i'] for i in range(len(pred)): pred_l2i[pred[i]] = l2i[pred[i]] pred_l2i_rank = [sorted(pred_l2i.items(), key=lambda k: k[1], reverse=True)] return pred_l2i_rank else: raise RuntimeError("path_fast_text_model_label2index is None")
[ "def", "prereocess_pred_xid", "(", "self", ",", "pred", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "path_fast_text_model_l2i_i2l", ")", ":", "pred_l2i", "=", "{", "}", "l2i", "=", "self", ".", "l2i_i2l", "[", "'l2i'", "]", "for", "i", "in", "range", "(", "len", "(", "pred", ")", ")", ":", "pred_l2i", "[", "pred", "[", "i", "]", "]", "=", "l2i", "[", "pred", "[", "i", "]", "]", "pred_l2i_rank", "=", "[", "sorted", "(", "pred_l2i", ".", "items", "(", ")", ",", "key", "=", "lambda", "k", ":", "k", "[", "1", "]", ",", "reverse", "=", "True", ")", "]", "return", "pred_l2i_rank", "else", ":", "raise", "RuntimeError", "(", "\"path_fast_text_model_label2index is None\"", ")" ]
https://github.com/yongzhuo/Keras-TextClassification/blob/640e3f44f90d9d8046546f7e1a93a29ebe5c8d30/keras_textclassification/data_preprocess/generator_preprocess.py#L40-L49
priomsrb/vimswitch
8d5732bbe9f0c00a8dd3e85c6ef18e37f4df49bb
vimswitch/FileDownloader.py
python
FileDownloader._getDownloadFilename
(self, url, headers)
return filename
Returns the filename of the download by first parsing the header and if unsuccessful, then parsing the url. If both fail, then returns an empty string.
Returns the filename of the download by first parsing the header and if unsuccessful, then parsing the url. If both fail, then returns an empty string.
[ "Returns", "the", "filename", "of", "the", "download", "by", "first", "parsing", "the", "header", "and", "if", "unsuccessful", "then", "parsing", "the", "url", ".", "If", "both", "fail", "then", "returns", "an", "empty", "string", "." ]
def _getDownloadFilename(self, url, headers): """ Returns the filename of the download by first parsing the header and if unsuccessful, then parsing the url. If both fail, then returns an empty string. """ if 'content-disposition' in headers: regex = 'attachment; filename=(.*)' contentDisposition = headers['content-disposition'] match = re.match(regex, contentDisposition) if match is not None: filename = match.group(1) return filename urlPath = urlparse(url).path filename = os.path.basename(urlPath) return filename
[ "def", "_getDownloadFilename", "(", "self", ",", "url", ",", "headers", ")", ":", "if", "'content-disposition'", "in", "headers", ":", "regex", "=", "'attachment; filename=(.*)'", "contentDisposition", "=", "headers", "[", "'content-disposition'", "]", "match", "=", "re", ".", "match", "(", "regex", ",", "contentDisposition", ")", "if", "match", "is", "not", "None", ":", "filename", "=", "match", ".", "group", "(", "1", ")", "return", "filename", "urlPath", "=", "urlparse", "(", "url", ")", ".", "path", "filename", "=", "os", ".", "path", ".", "basename", "(", "urlPath", ")", "return", "filename" ]
https://github.com/priomsrb/vimswitch/blob/8d5732bbe9f0c00a8dd3e85c6ef18e37f4df49bb/vimswitch/FileDownloader.py#L52-L68
Ledger-Donjon/lascar
7a1fc2187a9b642efcdda5d9177f86ec2345d7ba
lascar/container/container.py
python
Container.value_section
(self)
return self._value_section if hasattr(self, "_value_section") else None
Value area to be read from the original value. :type: list, range, slice
Value area to be read from the original value.
[ "Value", "area", "to", "be", "read", "from", "the", "original", "value", "." ]
def value_section(self): """ Value area to be read from the original value. :type: list, range, slice """ return self._value_section if hasattr(self, "_value_section") else None
[ "def", "value_section", "(", "self", ")", ":", "return", "self", ".", "_value_section", "if", "hasattr", "(", "self", ",", "\"_value_section\"", ")", "else", "None" ]
https://github.com/Ledger-Donjon/lascar/blob/7a1fc2187a9b642efcdda5d9177f86ec2345d7ba/lascar/container/container.py#L253-L259
eliben/pyelftools
8f7a0becaface09435c4374947548b7851e3d1a2
elftools/construct/adapters.py
python
MappingAdapter._encode
(self, obj, context)
[]
def _encode(self, obj, context): try: return self.encoding[obj] except (KeyError, TypeError): if self.encdefault is NotImplemented: raise MappingError("no encoding mapping for %r [%s]" % ( obj, self.subcon.name)) if self.encdefault is Pass: return obj return self.encdefault
[ "def", "_encode", "(", "self", ",", "obj", ",", "context", ")", ":", "try", ":", "return", "self", ".", "encoding", "[", "obj", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "if", "self", ".", "encdefault", "is", "NotImplemented", ":", "raise", "MappingError", "(", "\"no encoding mapping for %r [%s]\"", "%", "(", "obj", ",", "self", ".", "subcon", ".", "name", ")", ")", "if", "self", ".", "encdefault", "is", "Pass", ":", "return", "obj", "return", "self", ".", "encdefault" ]
https://github.com/eliben/pyelftools/blob/8f7a0becaface09435c4374947548b7851e3d1a2/elftools/construct/adapters.py#L84-L93
cltk/cltk
1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1
src/cltk/stem/lat.py
python
_matchremove_verb_endings
(word)
return word
Remove the verb endings
Remove the verb endings
[ "Remove", "the", "verb", "endings" ]
def _matchremove_verb_endings(word): """Remove the verb endings""" i_verb_endings = ["iuntur", "erunt", "untur", "iunt", "unt"] bi_verb_endings = ["beris", "bor", "bo"] eri_verb_endings = ["ero"] verb_endings = [ "mini", "ntur", "stis", "mur", "mus", "ris", "sti", "tis", "tur", "ns", "nt", "ri", "m", "r", "s", "t", ] # replace i verb endings with i for ending in i_verb_endings: if word.endswith(ending): word = re.sub(r"{0}$".format(ending), "i", word) return word # replace bi verb endings with bi for ending in bi_verb_endings: if word.endswith(ending): word = re.sub(r"{0}$".format(ending), "bi", word) return word # replace eri verb endings with eri for ending in eri_verb_endings: if word.endswith(ending): word = re.sub(r"{0}$".format(ending), "eri", word) return word # otherwise, remove general verb endings for ending in verb_endings: if word.endswith(ending): word = re.sub(r"{0}$".format(ending), "", word) break return word
[ "def", "_matchremove_verb_endings", "(", "word", ")", ":", "i_verb_endings", "=", "[", "\"iuntur\"", ",", "\"erunt\"", ",", "\"untur\"", ",", "\"iunt\"", ",", "\"unt\"", "]", "bi_verb_endings", "=", "[", "\"beris\"", ",", "\"bor\"", ",", "\"bo\"", "]", "eri_verb_endings", "=", "[", "\"ero\"", "]", "verb_endings", "=", "[", "\"mini\"", ",", "\"ntur\"", ",", "\"stis\"", ",", "\"mur\"", ",", "\"mus\"", ",", "\"ris\"", ",", "\"sti\"", ",", "\"tis\"", ",", "\"tur\"", ",", "\"ns\"", ",", "\"nt\"", ",", "\"ri\"", ",", "\"m\"", ",", "\"r\"", ",", "\"s\"", ",", "\"t\"", ",", "]", "# replace i verb endings with i", "for", "ending", "in", "i_verb_endings", ":", "if", "word", ".", "endswith", "(", "ending", ")", ":", "word", "=", "re", ".", "sub", "(", "r\"{0}$\"", ".", "format", "(", "ending", ")", ",", "\"i\"", ",", "word", ")", "return", "word", "# replace bi verb endings with bi", "for", "ending", "in", "bi_verb_endings", ":", "if", "word", ".", "endswith", "(", "ending", ")", ":", "word", "=", "re", ".", "sub", "(", "r\"{0}$\"", ".", "format", "(", "ending", ")", ",", "\"bi\"", ",", "word", ")", "return", "word", "# replace eri verb endings with eri", "for", "ending", "in", "eri_verb_endings", ":", "if", "word", ".", "endswith", "(", "ending", ")", ":", "word", "=", "re", ".", "sub", "(", "r\"{0}$\"", ".", "format", "(", "ending", ")", ",", "\"eri\"", ",", "word", ")", "return", "word", "# otherwise, remove general verb endings", "for", "ending", "in", "verb_endings", ":", "if", "word", ".", "endswith", "(", "ending", ")", ":", "word", "=", "re", ".", "sub", "(", "r\"{0}$\"", ".", "format", "(", "ending", ")", ",", "\"\"", ",", "word", ")", "break", "return", "word" ]
https://github.com/cltk/cltk/blob/1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1/src/cltk/stem/lat.py#L126-L178
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/dates.py
python
num2epoch
(d)
return (np.asarray(d) - 719163) * spd
Convert days since 0001 to epoch. *d* can be a number or sequence.
Convert days since 0001 to epoch. *d* can be a number or sequence.
[ "Convert", "days", "since", "0001", "to", "epoch", ".", "*", "d", "*", "can", "be", "a", "number", "or", "sequence", "." ]
def num2epoch(d): """ Convert days since 0001 to epoch. *d* can be a number or sequence. """ spd = 24. * 3600. return (np.asarray(d) - 719163) * spd
[ "def", "num2epoch", "(", "d", ")", ":", "spd", "=", "24.", "*", "3600.", "return", "(", "np", ".", "asarray", "(", "d", ")", "-", "719163", ")", "*", "spd" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/dates.py#L1166-L1171
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/debug/debugger.py
python
Debugger._setup_pending_breakpoints_load_dll
(self, dll_name)
[]
def _setup_pending_breakpoints_load_dll(self, dll_name): for bp in self._pending_breakpoints_new[None]: if isinstance(bp.addr, basestring): target_dll = bp.addr.lower().split("!")[0] # Cannot work AS-IS yet. Implement it ? # if target_dll == "*" or target_dll == dll_name: if target_dll == dll_name: _setup_method = getattr(self, "_setup_breakpoint_" + bp.type) if bp.apply_to_target(self.current_process): _setup_method(bp, self.current_process) else: for t in [t for t in self.current_process.threads if t.tid in self.threads]: _setup_method(bp, t) for bp in self._pending_breakpoints_new[self.current_process.pid]: if isinstance(bp.addr, basestring): target_dll = bp.addr.split("!")[0] if target_dll == dll_name: _setup_method = getattr(self, "_setup_breakpoint_" + bp.type) _setup_method(bp, self.current_process) for thread in self.current_process.threads: for bp in self._pending_breakpoints_new[thread.tid]: if isinstance(bp.addr, basestring): target_dll = bp.addr.split("!")[0] if target_dll == dll_name: _setup_method = getattr(self, "_setup_breakpoint_" + bp.type) _setup_method(bp, self.thread)
[ "def", "_setup_pending_breakpoints_load_dll", "(", "self", ",", "dll_name", ")", ":", "for", "bp", "in", "self", ".", "_pending_breakpoints_new", "[", "None", "]", ":", "if", "isinstance", "(", "bp", ".", "addr", ",", "basestring", ")", ":", "target_dll", "=", "bp", ".", "addr", ".", "lower", "(", ")", ".", "split", "(", "\"!\"", ")", "[", "0", "]", "# Cannot work AS-IS yet. Implement it ?", "# if target_dll == \"*\" or target_dll == dll_name:", "if", "target_dll", "==", "dll_name", ":", "_setup_method", "=", "getattr", "(", "self", ",", "\"_setup_breakpoint_\"", "+", "bp", ".", "type", ")", "if", "bp", ".", "apply_to_target", "(", "self", ".", "current_process", ")", ":", "_setup_method", "(", "bp", ",", "self", ".", "current_process", ")", "else", ":", "for", "t", "in", "[", "t", "for", "t", "in", "self", ".", "current_process", ".", "threads", "if", "t", ".", "tid", "in", "self", ".", "threads", "]", ":", "_setup_method", "(", "bp", ",", "t", ")", "for", "bp", "in", "self", ".", "_pending_breakpoints_new", "[", "self", ".", "current_process", ".", "pid", "]", ":", "if", "isinstance", "(", "bp", ".", "addr", ",", "basestring", ")", ":", "target_dll", "=", "bp", ".", "addr", ".", "split", "(", "\"!\"", ")", "[", "0", "]", "if", "target_dll", "==", "dll_name", ":", "_setup_method", "=", "getattr", "(", "self", ",", "\"_setup_breakpoint_\"", "+", "bp", ".", "type", ")", "_setup_method", "(", "bp", ",", "self", ".", "current_process", ")", "for", "thread", "in", "self", ".", "current_process", ".", "threads", ":", "for", "bp", "in", "self", ".", "_pending_breakpoints_new", "[", "thread", ".", "tid", "]", ":", "if", "isinstance", "(", "bp", ".", "addr", ",", "basestring", ")", ":", "target_dll", "=", "bp", ".", "addr", ".", "split", "(", "\"!\"", ")", "[", "0", "]", "if", "target_dll", "==", "dll_name", ":", "_setup_method", "=", "getattr", "(", "self", ",", "\"_setup_breakpoint_\"", "+", "bp", ".", "type", ")", "_setup_method", "(", "bp", ",", "self", ".", "thread", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/debug/debugger.py#L483-L510
dbt-labs/dbt-core
e943b9fc842535e958ef4fd0b8703adc91556bc6
core/dbt/logger.py
python
LogManager.format_text
(self)
[]
def format_text(self): for handler in self.objects: if isinstance(handler, FormatterMixin): handler.format_text()
[ "def", "format_text", "(", "self", ")", ":", "for", "handler", "in", "self", ".", "objects", ":", "if", "isinstance", "(", "handler", ",", "FormatterMixin", ")", ":", "handler", ".", "format_text", "(", ")" ]
https://github.com/dbt-labs/dbt-core/blob/e943b9fc842535e958ef4fd0b8703adc91556bc6/core/dbt/logger.py#L543-L546
n1nj4sec/pupy
a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39
pupy/packages/all/pupyutils/netcreds.py
python
Netcreds.parse_netntlm_resp_msg
(self, headers, resp_header, seq)
Parse the client response to the challenge
Parse the client response to the challenge
[ "Parse", "the", "client", "response", "to", "the", "challenge" ]
def parse_netntlm_resp_msg(self, headers, resp_header, seq): ''' Parse the client response to the challenge ''' try: header_val3 = headers[resp_header] except KeyError: return header_val3 = header_val3.split(' ', 1) # The header value can either start with NTLM or Negotiate if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate': try: msg3 = base64.decodestring(header_val3[1]) except binascii.Error: return return self.parse_ntlm_resp(msg3, seq)
[ "def", "parse_netntlm_resp_msg", "(", "self", ",", "headers", ",", "resp_header", ",", "seq", ")", ":", "try", ":", "header_val3", "=", "headers", "[", "resp_header", "]", "except", "KeyError", ":", "return", "header_val3", "=", "header_val3", ".", "split", "(", "' '", ",", "1", ")", "# The header value can either start with NTLM or Negotiate", "if", "header_val3", "[", "0", "]", "==", "'NTLM'", "or", "header_val3", "[", "0", "]", "==", "'Negotiate'", ":", "try", ":", "msg3", "=", "base64", ".", "decodestring", "(", "header_val3", "[", "1", "]", ")", "except", "binascii", ".", "Error", ":", "return", "return", "self", ".", "parse_ntlm_resp", "(", "msg3", ",", "seq", ")" ]
https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/packages/all/pupyutils/netcreds.py#L889-L905
gluon/AbletonLive9_RemoteScripts
0c0db5e2e29bbed88c82bf327f54d4968d36937e
pushbase/note_editor_component.py
python
NoteEditorComponent._add_note_in_step
(self, step, modify_existing = True)
return False
Add note in given step if there are none in there, otherwise select the step for potential deletion or modification
Add note in given step if there are none in there, otherwise select the step for potential deletion or modification
[ "Add", "note", "in", "given", "step", "if", "there", "are", "none", "in", "there", "otherwise", "select", "the", "step", "for", "potential", "deletion", "or", "modification" ]
def _add_note_in_step(self, step, modify_existing = True): """ Add note in given step if there are none in there, otherwise select the step for potential deletion or modification """ if liveobj_valid(self._sequencer_clip): x, y = step time = self._get_step_start_time(x, y) notes = self._time_step(time).filter_notes(self._clip_notes) if notes: if modify_existing: most_significant_velocity = most_significant_note(notes)[3] if self._mute_button and self._mute_button.is_pressed() or most_significant_velocity != 127 and self.full_velocity: self._trigger_modification(step, immediate=True) else: pitch = self._note_index mute = self._mute_button and self._mute_button.is_pressed() velocity = 127 if self.full_velocity else DEFAULT_VELOCITY note = (pitch, time, self._get_step_length(), velocity, mute) self._sequencer_clip.set_notes((note,)) self._sequencer_clip.deselect_all_notes() self._trigger_modification(step, done=True) return True return False
[ "def", "_add_note_in_step", "(", "self", ",", "step", ",", "modify_existing", "=", "True", ")", ":", "if", "liveobj_valid", "(", "self", ".", "_sequencer_clip", ")", ":", "x", ",", "y", "=", "step", "time", "=", "self", ".", "_get_step_start_time", "(", "x", ",", "y", ")", "notes", "=", "self", ".", "_time_step", "(", "time", ")", ".", "filter_notes", "(", "self", ".", "_clip_notes", ")", "if", "notes", ":", "if", "modify_existing", ":", "most_significant_velocity", "=", "most_significant_note", "(", "notes", ")", "[", "3", "]", "if", "self", ".", "_mute_button", "and", "self", ".", "_mute_button", ".", "is_pressed", "(", ")", "or", "most_significant_velocity", "!=", "127", "and", "self", ".", "full_velocity", ":", "self", ".", "_trigger_modification", "(", "step", ",", "immediate", "=", "True", ")", "else", ":", "pitch", "=", "self", ".", "_note_index", "mute", "=", "self", ".", "_mute_button", "and", "self", ".", "_mute_button", ".", "is_pressed", "(", ")", "velocity", "=", "127", "if", "self", ".", "full_velocity", "else", "DEFAULT_VELOCITY", "note", "=", "(", "pitch", ",", "time", ",", "self", ".", "_get_step_length", "(", ")", ",", "velocity", ",", "mute", ")", "self", ".", "_sequencer_clip", ".", "set_notes", "(", "(", "note", ",", ")", ")", "self", ".", "_sequencer_clip", ".", "deselect_all_notes", "(", ")", "self", ".", "_trigger_modification", "(", "step", ",", "done", "=", "True", ")", "return", "True", "return", "False" ]
https://github.com/gluon/AbletonLive9_RemoteScripts/blob/0c0db5e2e29bbed88c82bf327f54d4968d36937e/pushbase/note_editor_component.py#L399-L426
gramps-project/gramps
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
gramps/gui/plug/report/_bookdialog.py
python
BookSelector.on_edit_clicked
(self, obj)
Run the BookListDisplay dialog to present the choice of books to delete.
Run the BookListDisplay dialog to present the choice of books to delete.
[ "Run", "the", "BookListDisplay", "dialog", "to", "present", "the", "choice", "of", "books", "to", "delete", "." ]
def on_edit_clicked(self, obj): """ Run the BookListDisplay dialog to present the choice of books to delete. """ booklistdisplay = BookListDisplay(self.book_list, nodelete=False, dosave=True, parent=self.window) booklistdisplay.top.destroy() book = booklistdisplay.selection if book: self.open_book(book) self.name_entry.set_text(book.get_name()) self.book.set_name(book.get_name())
[ "def", "on_edit_clicked", "(", "self", ",", "obj", ")", ":", "booklistdisplay", "=", "BookListDisplay", "(", "self", ".", "book_list", ",", "nodelete", "=", "False", ",", "dosave", "=", "True", ",", "parent", "=", "self", ".", "window", ")", "booklistdisplay", ".", "top", ".", "destroy", "(", ")", "book", "=", "booklistdisplay", ".", "selection", "if", "book", ":", "self", ".", "open_book", "(", "book", ")", "self", ".", "name_entry", ".", "set_text", "(", "book", ".", "get_name", "(", ")", ")", "self", ".", "book", ".", "set_name", "(", "book", ".", "get_name", "(", ")", ")" ]
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gui/plug/report/_bookdialog.py#L789-L800
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
chap19/monitor/python-monitorclient-1.1/build/lib.linux-x86_64-2.7/monitorclient/v1/shell.py
python
do_type_list
(cs, args)
Print a list of available 'monitor types'.
Print a list of available 'monitor types'.
[ "Print", "a", "list", "of", "available", "monitor", "types", "." ]
def do_type_list(cs, args): """Print a list of available 'monitor types'.""" vtypes = cs.monitor_types.list() _print_monitor_type_list(vtypes)
[ "def", "do_type_list", "(", "cs", ",", "args", ")", ":", "vtypes", "=", "cs", ".", "monitor_types", ".", "list", "(", ")", "_print_monitor_type_list", "(", "vtypes", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/chap19/monitor/python-monitorclient-1.1/build/lib.linux-x86_64-2.7/monitorclient/v1/shell.py#L437-L440
inspurer/WorkAttendanceSystem
1221e2d67bdf5bb15fe99517cc3ded58ccb066df
V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/tarfile.py
python
ExFileObject.tell
(self)
return self.position
Return the current file position.
Return the current file position.
[ "Return", "the", "current", "file", "position", "." ]
def tell(self): """Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") return self.position
[ "def", "tell", "(", "self", ")", ":", "if", "self", ".", "closed", ":", "raise", "ValueError", "(", "\"I/O operation on closed file\"", ")", "return", "self", ".", "position" ]
https://github.com/inspurer/WorkAttendanceSystem/blob/1221e2d67bdf5bb15fe99517cc3ded58ccb066df/V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/tarfile.py#L876-L882
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/email/utils.py
python
formataddr
(pair, charset='utf-8')
return address
The inverse of parseaddr(), this takes a 2-tuple of the form (realname, email_address) and returns the string value suitable for an RFC 2822 From, To or Cc header. If the first element of pair is false, then the second element is returned unmodified. Optional charset if given is the character set that is used to encode realname in case realname is not ASCII safe. Can be an instance of str or a Charset-like object which has a header_encode method. Default is 'utf-8'.
The inverse of parseaddr(), this takes a 2-tuple of the form (realname, email_address) and returns the string value suitable for an RFC 2822 From, To or Cc header.
[ "The", "inverse", "of", "parseaddr", "()", "this", "takes", "a", "2", "-", "tuple", "of", "the", "form", "(", "realname", "email_address", ")", "and", "returns", "the", "string", "value", "suitable", "for", "an", "RFC", "2822", "From", "To", "or", "Cc", "header", "." ]
def formataddr(pair, charset='utf-8'): """The inverse of parseaddr(), this takes a 2-tuple of the form (realname, email_address) and returns the string value suitable for an RFC 2822 From, To or Cc header. If the first element of pair is false, then the second element is returned unmodified. Optional charset if given is the character set that is used to encode realname in case realname is not ASCII safe. Can be an instance of str or a Charset-like object which has a header_encode method. Default is 'utf-8'. """ name, address = pair # The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't. address.encode('ascii') if name: try: name.encode('ascii') except UnicodeEncodeError: if isinstance(charset, str): charset = Charset(charset) encoded_name = charset.header_encode(name) return "%s <%s>" % (encoded_name, address) else: quotes = '' if specialsre.search(name): quotes = '"' name = escapesre.sub(r'\\\g<0>', name) return '%s%s%s <%s>' % (quotes, name, quotes, address) return address
[ "def", "formataddr", "(", "pair", ",", "charset", "=", "'utf-8'", ")", ":", "name", ",", "address", "=", "pair", "# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.", "address", ".", "encode", "(", "'ascii'", ")", "if", "name", ":", "try", ":", "name", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "if", "isinstance", "(", "charset", ",", "str", ")", ":", "charset", "=", "Charset", "(", "charset", ")", "encoded_name", "=", "charset", ".", "header_encode", "(", "name", ")", "return", "\"%s <%s>\"", "%", "(", "encoded_name", ",", "address", ")", "else", ":", "quotes", "=", "''", "if", "specialsre", ".", "search", "(", "name", ")", ":", "quotes", "=", "'\"'", "name", "=", "escapesre", ".", "sub", "(", "r'\\\\\\g<0>'", ",", "name", ")", "return", "'%s%s%s <%s>'", "%", "(", "quotes", ",", "name", ",", "quotes", ",", "address", ")", "return", "address" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/email/utils.py#L76-L106
circuits/circuits
a1c404174835bc94bacb4b090280999d90f14e3b
circuits/web/headers.py
python
Headers.elements
(self, key)
return header_elements(key, self.get(key))
Return a sorted list of HeaderElements for the given header.
Return a sorted list of HeaderElements for the given header.
[ "Return", "a", "sorted", "list", "of", "HeaderElements", "for", "the", "given", "header", "." ]
def elements(self, key): """Return a sorted list of HeaderElements for the given header.""" return header_elements(key, self.get(key))
[ "def", "elements", "(", "self", ",", "key", ")", ":", "return", "header_elements", "(", "key", ",", "self", ".", "get", "(", "key", ")", ")" ]
https://github.com/circuits/circuits/blob/a1c404174835bc94bacb4b090280999d90f14e3b/circuits/web/headers.py#L224-L226
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/xml/sax/xmlreader.py
python
XMLReader.parse
(self, source)
Parse an XML document from a system identifier or an InputSource.
Parse an XML document from a system identifier or an InputSource.
[ "Parse", "an", "XML", "document", "from", "a", "system", "identifier", "or", "an", "InputSource", "." ]
def parse(self, source): "Parse an XML document from a system identifier or an InputSource." raise NotImplementedError("This method must be implemented!")
[ "def", "parse", "(", "self", ",", "source", ")", ":", "raise", "NotImplementedError", "(", "\"This method must be implemented!\"", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/xml/sax/xmlreader.py#L30-L32
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
contrib/watchdog/src/watchdog/utils/echo.py
python
echo_module
(mod, write=sys.stdout.write)
Echo calls to functions and methods in a module.
Echo calls to functions and methods in a module.
[ "Echo", "calls", "to", "functions", "and", "methods", "in", "a", "module", "." ]
def echo_module(mod, write=sys.stdout.write): """ Echo calls to functions and methods in a module. """ for fname, fn in inspect.getmembers(mod, inspect.isfunction): setattr(mod, fname, echo(fn, write)) for _, klass in inspect.getmembers(mod, inspect.isclass): echo_class(klass, write)
[ "def", "echo_module", "(", "mod", ",", "write", "=", "sys", ".", "stdout", ".", "write", ")", ":", "for", "fname", ",", "fn", "in", "inspect", ".", "getmembers", "(", "mod", ",", "inspect", ".", "isfunction", ")", ":", "setattr", "(", "mod", ",", "fname", ",", "echo", "(", "fn", ",", "write", ")", ")", "for", "_", ",", "klass", "in", "inspect", ".", "getmembers", "(", "mod", ",", "inspect", ".", "isclass", ")", ":", "echo_class", "(", "klass", ",", "write", ")" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/contrib/watchdog/src/watchdog/utils/echo.py#L123-L129
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/setuptools/pkg_resources/_vendor/pyparsing.py
python
LineEnd.parseImpl
( self, instring, loc, doActions=True )
[]
def parseImpl( self, instring, loc, doActions=True ): if loc<len(instring): if instring[loc] == "\n": return loc+1, "\n" else: raise ParseException(instring, loc, self.errmsg, self) elif loc == len(instring): return loc+1, [] else: raise ParseException(instring, loc, self.errmsg, self)
[ "def", "parseImpl", "(", "self", ",", "instring", ",", "loc", ",", "doActions", "=", "True", ")", ":", "if", "loc", "<", "len", "(", "instring", ")", ":", "if", "instring", "[", "loc", "]", "==", "\"\\n\"", ":", "return", "loc", "+", "1", ",", "\"\\n\"", "else", ":", "raise", "ParseException", "(", "instring", ",", "loc", ",", "self", ".", "errmsg", ",", "self", ")", "elif", "loc", "==", "len", "(", "instring", ")", ":", "return", "loc", "+", "1", ",", "[", "]", "else", ":", "raise", "ParseException", "(", "instring", ",", "loc", ",", "self", ".", "errmsg", ",", "self", ")" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/pkg_resources/_vendor/pyparsing.py#L3169-L3178
PINTO0309/PINTO_model_zoo
2924acda7a7d541d8712efd7cc4fd1c61ef5bddd
043_face_landmark/lib/dataset/augmentor/visual_augmentation.py
python
blur_heatmap
(src, ksize=(3, 3))
return src
[]
def blur_heatmap(src, ksize=(3, 3)): for i in range(src.shape[2]): src[:, :, i] = cv2.GaussianBlur(src[:, :, i], ksize, 0) amin, amax = src[:, :, i].min(), src[:, :, i].max() # ę±‚ęœ€å¤§ęœ€å°å€¼ if amax>0: src[:, :, i] = (src[:, :, i] - amin) / (amax - amin) # (ēŸ©é˜µå…ƒē“ -ęœ€å°å€¼)/(ęœ€å¤§å€¼-ęœ€å°å€¼) return src
[ "def", "blur_heatmap", "(", "src", ",", "ksize", "=", "(", "3", ",", "3", ")", ")", ":", "for", "i", "in", "range", "(", "src", ".", "shape", "[", "2", "]", ")", ":", "src", "[", ":", ",", ":", ",", "i", "]", "=", "cv2", ".", "GaussianBlur", "(", "src", "[", ":", ",", ":", ",", "i", "]", ",", "ksize", ",", "0", ")", "amin", ",", "amax", "=", "src", "[", ":", ",", ":", ",", "i", "]", ".", "min", "(", ")", ",", "src", "[", ":", ",", ":", ",", "i", "]", ".", "max", "(", ")", "# ę±‚ęœ€å¤§ęœ€å°å€¼", "if", "amax", ">", "0", ":", "src", "[", ":", ",", ":", ",", "i", "]", "=", "(", "src", "[", ":", ",", ":", ",", "i", "]", "-", "amin", ")", "/", "(", "amax", "-", "amin", ")", "# (ēŸ©é˜µå…ƒē“ -ęœ€å°å€¼)/(ęœ€å¤§å€¼-ęœ€å°å€¼)", "return", "src" ]
https://github.com/PINTO0309/PINTO_model_zoo/blob/2924acda7a7d541d8712efd7cc4fd1c61ef5bddd/043_face_landmark/lib/dataset/augmentor/visual_augmentation.py#L56-L62
shichao-an/leetcode-python
6c523ef4759a57433e10271b584eece16f9f05f3
lowest_common_ancestor_of_a_binary_tree/solution.py
python
Solution.lowestCommonAncestor
(self, root, p, q)
:type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode
:type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode
[ ":", "type", "root", ":", "TreeNode", ":", "type", "p", ":", "TreeNode", ":", "type", "q", ":", "TreeNode", ":", "rtype", ":", "TreeNode" ]
def lowestCommonAncestor(self, root, p, q): """ :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode """ # If both a and b exist in root, return their LCA; # if either a or b exist in root, return whichever exists; # if neither of them exist in root, return None if root is None: return None elif root is p or root is q: return root else: l = self.lowestCommonAncestor(root.left, p, q) r = self.lowestCommonAncestor(root.right, p, q) if l is not None and r is not None: return root else: if l is not None: return l if r is not None: return r
[ "def", "lowestCommonAncestor", "(", "self", ",", "root", ",", "p", ",", "q", ")", ":", "# If both a and b exist in root, return their LCA;", "# if either a or b exist in root, return whichever exists;", "# if neither of them exist in root, return None", "if", "root", "is", "None", ":", "return", "None", "elif", "root", "is", "p", "or", "root", "is", "q", ":", "return", "root", "else", ":", "l", "=", "self", ".", "lowestCommonAncestor", "(", "root", ".", "left", ",", "p", ",", "q", ")", "r", "=", "self", ".", "lowestCommonAncestor", "(", "root", ".", "right", ",", "p", ",", "q", ")", "if", "l", "is", "not", "None", "and", "r", "is", "not", "None", ":", "return", "root", "else", ":", "if", "l", "is", "not", "None", ":", "return", "l", "if", "r", "is", "not", "None", ":", "return", "r" ]
https://github.com/shichao-an/leetcode-python/blob/6c523ef4759a57433e10271b584eece16f9f05f3/lowest_common_ancestor_of_a_binary_tree/solution.py#L29-L52
freelawproject/courtlistener
ab3ae7bb6e5e836b286749113e7dbb403d470912
cl/opinion_page/views.py
python
view_recap_document
( request: HttpRequest, docket_id: Optional[int] = None, doc_num: Optional[int] = None, att_num: Optional[int] = None, slug: str = "", )
return render( request, "recap_document.html", { "rd": rd, "title": title, "favorite_form": favorite_form, "private": True, # Always True for RECAP docs. }, )
This view can either load an attachment or a regular document, depending on the URL pattern that is matched.
This view can either load an attachment or a regular document, depending on the URL pattern that is matched.
[ "This", "view", "can", "either", "load", "an", "attachment", "or", "a", "regular", "document", "depending", "on", "the", "URL", "pattern", "that", "is", "matched", "." ]
def view_recap_document( request: HttpRequest, docket_id: Optional[int] = None, doc_num: Optional[int] = None, att_num: Optional[int] = None, slug: str = "", ) -> HttpResponse: """This view can either load an attachment or a regular document, depending on the URL pattern that is matched. """ try: rd = RECAPDocument.objects.filter( docket_entry__docket__id=docket_id, document_number=doc_num, attachment_number=att_num, ).order_by("pk")[0] except IndexError: raise Http404("No RECAPDocument matches the given query.") title = make_rd_title(rd) rd = make_thumb_if_needed(request, rd) try: fave = Favorite.objects.get(recap_doc_id=rd.pk, user=request.user) except (ObjectDoesNotExist, TypeError): # Not favorited or anonymous user favorite_form = FavoriteForm( initial={ "recap_doc_id": rd.pk, "name": trunc(title, 100, ellipsis="..."), } ) else: favorite_form = FavoriteForm(instance=fave) return render( request, "recap_document.html", { "rd": rd, "title": title, "favorite_form": favorite_form, "private": True, # Always True for RECAP docs. }, )
[ "def", "view_recap_document", "(", "request", ":", "HttpRequest", ",", "docket_id", ":", "Optional", "[", "int", "]", "=", "None", ",", "doc_num", ":", "Optional", "[", "int", "]", "=", "None", ",", "att_num", ":", "Optional", "[", "int", "]", "=", "None", ",", "slug", ":", "str", "=", "\"\"", ",", ")", "->", "HttpResponse", ":", "try", ":", "rd", "=", "RECAPDocument", ".", "objects", ".", "filter", "(", "docket_entry__docket__id", "=", "docket_id", ",", "document_number", "=", "doc_num", ",", "attachment_number", "=", "att_num", ",", ")", ".", "order_by", "(", "\"pk\"", ")", "[", "0", "]", "except", "IndexError", ":", "raise", "Http404", "(", "\"No RECAPDocument matches the given query.\"", ")", "title", "=", "make_rd_title", "(", "rd", ")", "rd", "=", "make_thumb_if_needed", "(", "request", ",", "rd", ")", "try", ":", "fave", "=", "Favorite", ".", "objects", ".", "get", "(", "recap_doc_id", "=", "rd", ".", "pk", ",", "user", "=", "request", ".", "user", ")", "except", "(", "ObjectDoesNotExist", ",", "TypeError", ")", ":", "# Not favorited or anonymous user", "favorite_form", "=", "FavoriteForm", "(", "initial", "=", "{", "\"recap_doc_id\"", ":", "rd", ".", "pk", ",", "\"name\"", ":", "trunc", "(", "title", ",", "100", ",", "ellipsis", "=", "\"...\"", ")", ",", "}", ")", "else", ":", "favorite_form", "=", "FavoriteForm", "(", "instance", "=", "fave", ")", "return", "render", "(", "request", ",", "\"recap_document.html\"", ",", "{", "\"rd\"", ":", "rd", ",", "\"title\"", ":", "title", ",", "\"favorite_form\"", ":", "favorite_form", ",", "\"private\"", ":", "True", ",", "# Always True for RECAP docs.", "}", ",", ")" ]
https://github.com/freelawproject/courtlistener/blob/ab3ae7bb6e5e836b286749113e7dbb403d470912/cl/opinion_page/views.py#L408-L451