nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
django-haystack/django-haystack
b6dd72e6b5c97b782f5436b7bb4e8227ba6e3b06
haystack/query.py
python
ValuesListSearchQuerySet.post_process_results
(self, results)
return to_cache
[]
def post_process_results(self, results): to_cache = [] if self._flat: accum = to_cache.extend else: accum = to_cache.append for result in results: accum([getattr(result, i, None) for i in self._fields]) return to_cache
[ "def", "post_process_results", "(", "self", ",", "results", ")", ":", "to_cache", "=", "[", "]", "if", "self", ".", "_flat", ":", "accum", "=", "to_cache", ".", "extend", "else", ":", "accum", "=", "to_cache", ".", "append", "for", "result", "in", "results", ":", "accum", "(", "[", "getattr", "(", "result", ",", "i", ",", "None", ")", "for", "i", "in", "self", ".", "_fields", "]", ")", "return", "to_cache" ]
https://github.com/django-haystack/django-haystack/blob/b6dd72e6b5c97b782f5436b7bb4e8227ba6e3b06/haystack/query.py#L692-L703
radlab/sparrow
afb8efadeb88524f1394d1abe4ea66c6fd2ac744
src/main/python/parse_per_task_logs.py
python
Request.queue_times
(self)
return queue_times
Returns a list of queue times for all complete __tasks.
Returns a list of queue times for all complete __tasks.
[ "Returns", "a", "list", "of", "queue", "times", "for", "all", "complete", "__tasks", "." ]
def queue_times(self): """ Returns a list of queue times for all complete __tasks. """ queue_times = [] for task in self.__tasks.values(): if task.complete(): queue_times.append(task.queued_time()) return queue_times
[ "def", "queue_times", "(", "self", ")", ":", "queue_times", "=", "[", "]", "for", "task", "in", "self", ".", "__tasks", ".", "values", "(", ")", ":", "if", "task", ".", "complete", "(", ")", ":", "queue_times", ".", "append", "(", "task", ".", "queued_time", "(", ")", ")", "return", "queue_times" ]
https://github.com/radlab/sparrow/blob/afb8efadeb88524f1394d1abe4ea66c6fd2ac744/src/main/python/parse_per_task_logs.py#L359-L365
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/ads/binary_sensor.py
python
AdsBinarySensor.__init__
(self, ads_hub, name, ads_var, device_class)
Initialize ADS binary sensor.
Initialize ADS binary sensor.
[ "Initialize", "ADS", "binary", "sensor", "." ]
def __init__(self, ads_hub, name, ads_var, device_class): """Initialize ADS binary sensor.""" super().__init__(ads_hub, name, ads_var) self._attr_device_class = device_class or BinarySensorDeviceClass.MOVING
[ "def", "__init__", "(", "self", ",", "ads_hub", ",", "name", ",", "ads_var", ",", "device_class", ")", ":", "super", "(", ")", ".", "__init__", "(", "ads_hub", ",", "name", ",", "ads_var", ")", "self", ".", "_attr_device_class", "=", "device_class", "or", "BinarySensorDeviceClass", ".", "MOVING" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/ads/binary_sensor.py#L51-L54
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/selinux.py
python
port_delete_policy
(name, protocol=None, port=None)
return _port_add_or_delete_policy("delete", name, None, protocol, port, None)
.. versionadded:: 2019.2.0 Deletes the SELinux policy for a given protocol and port. Returns the result of the call to semanage. name The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``. protocol The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted. port The port or port range. Required if name is not formatted. CLI Example: .. code-block:: bash salt '*' selinux.port_delete_policy tcp/8080 salt '*' selinux.port_delete_policy foobar protocol=tcp port=8091
.. versionadded:: 2019.2.0
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
def port_delete_policy(name, protocol=None, port=None): """ .. versionadded:: 2019.2.0 Deletes the SELinux policy for a given protocol and port. Returns the result of the call to semanage. name The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``. protocol The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted. port The port or port range. Required if name is not formatted. CLI Example: .. code-block:: bash salt '*' selinux.port_delete_policy tcp/8080 salt '*' selinux.port_delete_policy foobar protocol=tcp port=8091 """ return _port_add_or_delete_policy("delete", name, None, protocol, port, None)
[ "def", "port_delete_policy", "(", "name", ",", "protocol", "=", "None", ",", "port", "=", "None", ")", ":", "return", "_port_add_or_delete_policy", "(", "\"delete\"", ",", "name", ",", "None", ",", "protocol", ",", "port", ",", "None", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/selinux.py#L805-L829
pyscf/pyscf
0adfb464333f5ceee07b664f291d4084801bae64
pyscf/pbc/scf/khf.py
python
analyze
(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN, **kwargs)
Analyze the given SCF object: print orbital energies, occupancies; print orbital coefficients; Mulliken population analysis; Dipole moment
Analyze the given SCF object: print orbital energies, occupancies; print orbital coefficients; Mulliken population analysis; Dipole moment
[ "Analyze", "the", "given", "SCF", "object", ":", "print", "orbital", "energies", "occupancies", ";", "print", "orbital", "coefficients", ";", "Mulliken", "population", "analysis", ";", "Dipole", "moment" ]
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN, **kwargs): '''Analyze the given SCF object: print orbital energies, occupancies; print orbital coefficients; Mulliken population analysis; Dipole moment ''' mf.dump_scf_summary(verbose) mo_occ = mf.mo_occ mo_coeff = mf.mo_coeff ovlp_ao = mf.get_ovlp() dm = mf.make_rdm1(mo_coeff, mo_occ) if with_meta_lowdin: return mf.mulliken_meta(mf.cell, dm, s=ovlp_ao, verbose=verbose) else: raise NotImplementedError
[ "def", "analyze", "(", "mf", ",", "verbose", "=", "logger", ".", "DEBUG", ",", "with_meta_lowdin", "=", "WITH_META_LOWDIN", ",", "*", "*", "kwargs", ")", ":", "mf", ".", "dump_scf_summary", "(", "verbose", ")", "mo_occ", "=", "mf", ".", "mo_occ", "mo_coeff", "=", "mf", ".", "mo_coeff", "ovlp_ao", "=", "mf", ".", "get_ovlp", "(", ")", "dm", "=", "mf", ".", "make_rdm1", "(", "mo_coeff", ",", "mo_occ", ")", "if", "with_meta_lowdin", ":", "return", "mf", ".", "mulliken_meta", "(", "mf", ".", "cell", ",", "dm", ",", "s", "=", "ovlp_ao", ",", "verbose", "=", "verbose", ")", "else", ":", "raise", "NotImplementedError" ]
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/pbc/scf/khf.py#L280-L294
aneisch/home-assistant-config
86e381fde9609cb8871c439c433c12989e4e225d
custom_components/monitor_docker/switch.py
python
DockerContainerSwitch.event_callback
(self, name="", remove=False)
Callback for update of container information.
Callback for update of container information.
[ "Callback", "for", "update", "of", "container", "information", "." ]
def event_callback(self, name="", remove=False): """Callback for update of container information.""" if remove: # If already called before, do not remove it again if self._removed: return _LOGGER.info("[%s] %s: Removing switch entity", self._instance, self._cname) self._loop.create_task(self.async_remove()) self._removed = True return state = None try: info = self._container.get_info() except Exception as err: _LOGGER.error( "[%s] %s: Cannot request container info (%s)", self._instance, name, str(err), ) else: if info is not None: state = info.get(CONTAINER_INFO_STATE) == "running" if state is not self._state: self._state = state self.async_schedule_update_ha_state()
[ "def", "event_callback", "(", "self", ",", "name", "=", "\"\"", ",", "remove", "=", "False", ")", ":", "if", "remove", ":", "# If already called before, do not remove it again", "if", "self", ".", "_removed", ":", "return", "_LOGGER", ".", "info", "(", "\"[%s] %s: Removing switch entity\"", ",", "self", ".", "_instance", ",", "self", ".", "_cname", ")", "self", ".", "_loop", ".", "create_task", "(", "self", ".", "async_remove", "(", ")", ")", "self", ".", "_removed", "=", "True", "return", "state", "=", "None", "try", ":", "info", "=", "self", ".", "_container", ".", "get_info", "(", ")", "except", "Exception", "as", "err", ":", "_LOGGER", ".", "error", "(", "\"[%s] %s: Cannot request container info (%s)\"", ",", "self", ".", "_instance", ",", "name", ",", "str", "(", "err", ")", ",", ")", "else", ":", "if", "info", "is", "not", "None", ":", "state", "=", "info", ".", "get", "(", "CONTAINER_INFO_STATE", ")", "==", "\"running\"", "if", "state", "is", "not", "self", ".", "_state", ":", "self", ".", "_state", "=", "state", "self", ".", "async_schedule_update_ha_state", "(", ")" ]
https://github.com/aneisch/home-assistant-config/blob/86e381fde9609cb8871c439c433c12989e4e225d/custom_components/monitor_docker/switch.py#L197-L227
HonglinChu/SiamTrackers
8471660b14f970578a43f077b28207d44a27e867
SiamFCpp/SiamFCpp-video_analyst/siamfcpp/utils/visualization.py
python
VideoWriter.__init__
(self, video_file, fps=25, scale=1.0)
:param video_file: path to write video. Perform nothing in case of None :param fps: frame per second :param scale: resize scale
[]
def __init__(self, video_file, fps=25, scale=1.0): """ :param video_file: path to write video. Perform nothing in case of None :param fps: frame per second :param scale: resize scale """ self.video_file = video_file self.fps = fps self.writer = None self.scale = scale
[ "def", "__init__", "(", "self", ",", "video_file", ",", "fps", "=", "25", ",", "scale", "=", "1.0", ")", ":", "self", ".", "video_file", "=", "video_file", "self", ".", "fps", "=", "fps", "self", ".", "writer", "=", "None", "self", ".", "scale", "=", "scale" ]
https://github.com/HonglinChu/SiamTrackers/blob/8471660b14f970578a43f077b28207d44a27e867/SiamFCpp/SiamFCpp-video_analyst/siamfcpp/utils/visualization.py#L17-L27
naftaliharris/tauthon
5587ceec329b75f7caf6d65a036db61ac1bae214
Lib/idlelib/CallTips.py
python
get_arg_text
(ob)
return argspec
Return a string describing the signature of a callable object, or ''. For Python-coded functions and methods, the first line is introspected. Delete 'self' parameter for classes (.__init__) and bound methods. The next lines are the first lines of the doc string up to the first empty line or _MAX_LINES. For builtins, this typically includes the arguments in addition to the return value.
Return a string describing the signature of a callable object, or ''.
[ "Return", "a", "string", "describing", "the", "signature", "of", "a", "callable", "object", "or", "." ]
def get_arg_text(ob): '''Return a string describing the signature of a callable object, or ''. For Python-coded functions and methods, the first line is introspected. Delete 'self' parameter for classes (.__init__) and bound methods. The next lines are the first lines of the doc string up to the first empty line or _MAX_LINES. For builtins, this typically includes the arguments in addition to the return value. ''' argspec = "" try: ob_call = ob.__call__ except BaseException: if type(ob) is types.ClassType: # old-style ob_call = ob else: return argspec arg_offset = 0 if type(ob) in (types.ClassType, types.TypeType): # Look for the first __init__ in the class chain with .im_func. # Slot wrappers (builtins, classes defined in funcs) do not. fob = _find_constructor(ob) if fob is None: fob = lambda: None else: arg_offset = 1 elif type(ob) == types.MethodType: # bit of a hack for methods - turn it into a function # and drop the "self" param for bound methods fob = ob.im_func if ob.im_self is not None: arg_offset = 1 elif type(ob_call) == types.MethodType: # a callable class instance fob = ob_call.im_func arg_offset = 1 else: fob = ob # Try to build one for Python defined functions if type(fob) in [types.FunctionType, types.LambdaType]: argcount = fob.func_code.co_argcount real_args = fob.func_code.co_varnames[arg_offset:argcount] defaults = fob.func_defaults or [] defaults = list(map(lambda name: "=%s" % repr(name), defaults)) defaults = [""] * (len(real_args) - len(defaults)) + defaults items = map(lambda arg, dflt: arg + dflt, real_args, defaults) for flag, pre, name in ((0x4, '*', 'args'), (0x8, '**', 'kwargs')): if fob.func_code.co_flags & flag: pre_name = pre + name if name not in real_args: items.append(pre_name) else: i = 1 while ((name+'%s') % i) in real_args: i += 1 items.append((pre_name+'%s') % i) argspec = ", ".join(items) argspec = "(%s)" % re.sub("(?<!\d)\.\d+", "<tuple>", argspec) lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT) if len(argspec) > _MAX_COLS else [argspec] if argspec else []) if isinstance(ob_call, types.MethodType): doc = ob_call.__doc__ else: doc = getattr(ob, "__doc__", "") if doc: for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]: line = line.strip() if not line: break if len(line) > _MAX_COLS: line = line[: _MAX_COLS - 3] + '...' lines.append(line) argspec = '\n'.join(lines) return argspec
[ "def", "get_arg_text", "(", "ob", ")", ":", "argspec", "=", "\"\"", "try", ":", "ob_call", "=", "ob", ".", "__call__", "except", "BaseException", ":", "if", "type", "(", "ob", ")", "is", "types", ".", "ClassType", ":", "# old-style", "ob_call", "=", "ob", "else", ":", "return", "argspec", "arg_offset", "=", "0", "if", "type", "(", "ob", ")", "in", "(", "types", ".", "ClassType", ",", "types", ".", "TypeType", ")", ":", "# Look for the first __init__ in the class chain with .im_func.", "# Slot wrappers (builtins, classes defined in funcs) do not.", "fob", "=", "_find_constructor", "(", "ob", ")", "if", "fob", "is", "None", ":", "fob", "=", "lambda", ":", "None", "else", ":", "arg_offset", "=", "1", "elif", "type", "(", "ob", ")", "==", "types", ".", "MethodType", ":", "# bit of a hack for methods - turn it into a function", "# and drop the \"self\" param for bound methods", "fob", "=", "ob", ".", "im_func", "if", "ob", ".", "im_self", "is", "not", "None", ":", "arg_offset", "=", "1", "elif", "type", "(", "ob_call", ")", "==", "types", ".", "MethodType", ":", "# a callable class instance", "fob", "=", "ob_call", ".", "im_func", "arg_offset", "=", "1", "else", ":", "fob", "=", "ob", "# Try to build one for Python defined functions", "if", "type", "(", "fob", ")", "in", "[", "types", ".", "FunctionType", ",", "types", ".", "LambdaType", "]", ":", "argcount", "=", "fob", ".", "func_code", ".", "co_argcount", "real_args", "=", "fob", ".", "func_code", ".", "co_varnames", "[", "arg_offset", ":", "argcount", "]", "defaults", "=", "fob", ".", "func_defaults", "or", "[", "]", "defaults", "=", "list", "(", "map", "(", "lambda", "name", ":", "\"=%s\"", "%", "repr", "(", "name", ")", ",", "defaults", ")", ")", "defaults", "=", "[", "\"\"", "]", "*", "(", "len", "(", "real_args", ")", "-", "len", "(", "defaults", ")", ")", "+", "defaults", "items", "=", "map", "(", "lambda", "arg", ",", "dflt", ":", "arg", "+", "dflt", ",", "real_args", ",", "defaults", ")", "for", "flag", ",", "pre", ",", "name", "in", "(", "(", "0x4", ",", "'*'", ",", "'args'", ")", ",", "(", "0x8", ",", "'**'", ",", "'kwargs'", ")", ")", ":", "if", "fob", ".", "func_code", ".", "co_flags", "&", "flag", ":", "pre_name", "=", "pre", "+", "name", "if", "name", "not", "in", "real_args", ":", "items", ".", "append", "(", "pre_name", ")", "else", ":", "i", "=", "1", "while", "(", "(", "name", "+", "'%s'", ")", "%", "i", ")", "in", "real_args", ":", "i", "+=", "1", "items", ".", "append", "(", "(", "pre_name", "+", "'%s'", ")", "%", "i", ")", "argspec", "=", "\", \"", ".", "join", "(", "items", ")", "argspec", "=", "\"(%s)\"", "%", "re", ".", "sub", "(", "\"(?<!\\d)\\.\\d+\"", ",", "\"<tuple>\"", ",", "argspec", ")", "lines", "=", "(", "textwrap", ".", "wrap", "(", "argspec", ",", "_MAX_COLS", ",", "subsequent_indent", "=", "_INDENT", ")", "if", "len", "(", "argspec", ")", ">", "_MAX_COLS", "else", "[", "argspec", "]", "if", "argspec", "else", "[", "]", ")", "if", "isinstance", "(", "ob_call", ",", "types", ".", "MethodType", ")", ":", "doc", "=", "ob_call", ".", "__doc__", "else", ":", "doc", "=", "getattr", "(", "ob", ",", "\"__doc__\"", ",", "\"\"", ")", "if", "doc", ":", "for", "line", "in", "doc", ".", "split", "(", "'\\n'", ",", "_MAX_LINES", ")", "[", ":", "_MAX_LINES", "]", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "break", "if", "len", "(", "line", ")", ">", "_MAX_COLS", ":", "line", "=", "line", "[", ":", "_MAX_COLS", "-", "3", "]", "+", "'...'", "lines", ".", "append", "(", "line", ")", "argspec", "=", "'\\n'", ".", "join", "(", "lines", ")", "return", "argspec" ]
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/idlelib/CallTips.py#L139-L215
hexway/apple_bleee
1f8022959be660b561e6004b808dd93fa252bc90
npyscreen/wgwidget.py
python
InputHandler.set_up_handlers
(self)
This function should be called somewhere during object initialisation (which all library-defined widgets do). You might like to override this in your own definition, but in most cases the add_handers or add_complex_handlers methods are what you want.
This function should be called somewhere during object initialisation (which all library-defined widgets do). You might like to override this in your own definition, but in most cases the add_handers or add_complex_handlers methods are what you want.
[ "This", "function", "should", "be", "called", "somewhere", "during", "object", "initialisation", "(", "which", "all", "library", "-", "defined", "widgets", "do", ")", ".", "You", "might", "like", "to", "override", "this", "in", "your", "own", "definition", "but", "in", "most", "cases", "the", "add_handers", "or", "add_complex_handlers", "methods", "are", "what", "you", "want", "." ]
def set_up_handlers(self): """This function should be called somewhere during object initialisation (which all library-defined widgets do). You might like to override this in your own definition, but in most cases the add_handers or add_complex_handlers methods are what you want.""" #called in __init__ self.handlers = { curses.ascii.NL: self.h_exit_down, curses.ascii.CR: self.h_exit_down, curses.ascii.TAB: self.h_exit_down, curses.KEY_BTAB: self.h_exit_up, curses.KEY_DOWN: self.h_exit_down, curses.KEY_UP: self.h_exit_up, curses.KEY_LEFT: self.h_exit_left, curses.KEY_RIGHT: self.h_exit_right, # "^P": self.h_exit_up, # "^N": self.h_exit_down, curses.ascii.ESC: self.h_exit_escape, curses.KEY_MOUSE: self.h_exit_mouse, } self.complex_handlers = []
[ "def", "set_up_handlers", "(", "self", ")", ":", "#called in __init__", "self", ".", "handlers", "=", "{", "curses", ".", "ascii", ".", "NL", ":", "self", ".", "h_exit_down", ",", "curses", ".", "ascii", ".", "CR", ":", "self", ".", "h_exit_down", ",", "curses", ".", "ascii", ".", "TAB", ":", "self", ".", "h_exit_down", ",", "curses", ".", "KEY_BTAB", ":", "self", ".", "h_exit_up", ",", "curses", ".", "KEY_DOWN", ":", "self", ".", "h_exit_down", ",", "curses", ".", "KEY_UP", ":", "self", ".", "h_exit_up", ",", "curses", ".", "KEY_LEFT", ":", "self", ".", "h_exit_left", ",", "curses", ".", "KEY_RIGHT", ":", "self", ".", "h_exit_right", ",", "# \"^P\": self.h_exit_up,", "# \"^N\": self.h_exit_down,", "curses", ".", "ascii", ".", "ESC", ":", "self", ".", "h_exit_escape", ",", "curses", ".", "KEY_MOUSE", ":", "self", ".", "h_exit_mouse", ",", "}", "self", ".", "complex_handlers", "=", "[", "]" ]
https://github.com/hexway/apple_bleee/blob/1f8022959be660b561e6004b808dd93fa252bc90/npyscreen/wgwidget.py#L103-L122
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/urllib/request.py
python
URLopener.open_http
(self, url, data=None)
return self._open_generic_http(http.client.HTTPConnection, url, data)
Use HTTP protocol.
Use HTTP protocol.
[ "Use", "HTTP", "protocol", "." ]
def open_http(self, url, data=None): """Use HTTP protocol.""" return self._open_generic_http(http.client.HTTPConnection, url, data)
[ "def", "open_http", "(", "self", ",", "url", ",", "data", "=", "None", ")", ":", "return", "self", ".", "_open_generic_http", "(", "http", ".", "client", ".", "HTTPConnection", ",", "url", ",", "data", ")" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/urllib/request.py#L1861-L1863
freewym/espresso
6671c507350295269e38add57dbe601dcb8e6ecf
fairseq/models/roberta/model.py
python
RobertaEncoder.forward
( self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused, )
return x, extra
Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`.
Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False).
[ "Args", ":", "src_tokens", "(", "LongTensor", ")", ":", "input", "tokens", "of", "shape", "(", "batch", "src_len", ")", "features_only", "(", "bool", "optional", ")", ":", "skip", "LM", "head", "and", "just", "return", "features", ".", "If", "True", "the", "output", "will", "be", "of", "shape", "(", "batch", "src_len", "embed_dim", ")", ".", "return_all_hiddens", "(", "bool", "optional", ")", ":", "also", "return", "all", "of", "the", "intermediate", "hidden", "states", "(", "default", ":", "False", ")", "." ]
def forward( self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused, ): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`. """ x, extra = self.extract_features( src_tokens, return_all_hiddens=return_all_hiddens ) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra
[ "def", "forward", "(", "self", ",", "src_tokens", ",", "features_only", "=", "False", ",", "return_all_hiddens", "=", "False", ",", "masked_tokens", "=", "None", ",", "*", "*", "unused", ",", ")", ":", "x", ",", "extra", "=", "self", ".", "extract_features", "(", "src_tokens", ",", "return_all_hiddens", "=", "return_all_hiddens", ")", "if", "not", "features_only", ":", "x", "=", "self", ".", "output_layer", "(", "x", ",", "masked_tokens", "=", "masked_tokens", ")", "return", "x", ",", "extra" ]
https://github.com/freewym/espresso/blob/6671c507350295269e38add57dbe601dcb8e6ecf/fairseq/models/roberta/model.py#L558-L587
guildai/guildai
1665985a3d4d788efc1a3180ca51cc417f71ca78
guild/external/pip/_vendor/requests/models.py
python
PreparedRequest.prepare_hooks
(self, hooks)
Prepares the given hooks.
Prepares the given hooks.
[ "Prepares", "the", "given", "hooks", "." ]
def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event])
[ "def", "prepare_hooks", "(", "self", ",", "hooks", ")", ":", "# hooks can be passed as None to the prepare method and to this", "# method. To prevent iterating over None, simply use an empty list", "# if hooks is False-y", "hooks", "=", "hooks", "or", "[", "]", "for", "event", "in", "hooks", ":", "self", ".", "register_hook", "(", "event", ",", "hooks", "[", "event", "]", ")" ]
https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/pip/_vendor/requests/models.py#L572-L579
fluentpython/example-code-2e
80f7f84274a47579e59c29a4657691525152c9d5
12-seq-hacking/vector_v2.py
python
Vector.__len__
(self)
return len(self._components)
[]
def __len__(self): return len(self._components)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "_components", ")" ]
https://github.com/fluentpython/example-code-2e/blob/80f7f84274a47579e59c29a4657691525152c9d5/12-seq-hacking/vector_v2.py#L149-L150
proycon/pynlpl
7707f69a91caaa6cde037f0d0379f1d42500a68b
pynlpl/statistics.py
python
FrequencyList.__contains__
(self, type)
return type in self._count
Checks if the specified type is in the frequency list
Checks if the specified type is in the frequency list
[ "Checks", "if", "the", "specified", "type", "is", "in", "the", "frequency", "list" ]
def __contains__(self, type): """Checks if the specified type is in the frequency list""" if self.dovalidation: type = self._validate(type) return type in self._count
[ "def", "__contains__", "(", "self", ",", "type", ")", ":", "if", "self", ".", "dovalidation", ":", "type", "=", "self", ".", "_validate", "(", "type", ")", "return", "type", "in", "self", ".", "_count" ]
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/statistics.py#L167-L170
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/packages/site-packages/sqlalchemy/orm/interfaces.py
python
MapperOption.process_query_conditionally
(self, query)
same as process_query(), except that this option may not apply to the given query. This is typically used during a lazy load or scalar refresh operation to propagate options stated in the original Query to the new Query being used for the load. It occurs for those options that specify propagate_to_loaders=True.
same as process_query(), except that this option may not apply to the given query.
[ "same", "as", "process_query", "()", "except", "that", "this", "option", "may", "not", "apply", "to", "the", "given", "query", "." ]
def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. This is typically used during a lazy load or scalar refresh operation to propagate options stated in the original Query to the new Query being used for the load. It occurs for those options that specify propagate_to_loaders=True. """ self.process_query(query)
[ "def", "process_query_conditionally", "(", "self", ",", "query", ")", ":", "self", ".", "process_query", "(", "query", ")" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/packages/site-packages/sqlalchemy/orm/interfaces.py#L568-L579
bytedance/byteps
d0bcf1a87ee87539ceb29bcc976d4da063ffc47b
byteps/torch/compression.py
python
Compressor.decompress
(tensor, ctx)
Decompress the tensor with the given context.
Decompress the tensor with the given context.
[ "Decompress", "the", "tensor", "with", "the", "given", "context", "." ]
def decompress(tensor, ctx): """Decompress the tensor with the given context.""" pass
[ "def", "decompress", "(", "tensor", ",", "ctx", ")", ":", "pass" ]
https://github.com/bytedance/byteps/blob/d0bcf1a87ee87539ceb29bcc976d4da063ffc47b/byteps/torch/compression.py#L29-L31
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
futures/ctp/ApiStruct.py
python
ReqOpenAccount.__init__
(self, TradeCode='', BankID='', BankBranchID='', BrokerID='', BrokerBranchID='', TradeDate='', TradeTime='', BankSerial='', TradingDay='', PlateSerial=0, LastFragment=LF_Yes, SessionID=0, CustomerName='', IdCardType=ICT_EID, IdentifiedCardNo='', Gender=GD_Unknown, CountryCode='', CustType=CUSTT_Person, Address='', ZipCode='', Telephone='', MobilePhone='', Fax='', EMail='', MoneyAccountStatus=MAS_Normal, BankAccount='', BankPassWord='', AccountID='', Password='', InstallID=0, VerifyCertNoFlag=YNI_Yes, CurrencyID='', CashExchangeCode=CEC_Exchange, Digest='', BankAccType=BAT_BankBook, DeviceID='', BankSecuAccType=BAT_BankBook, BrokerIDByBank='', BankSecuAcc='', BankPwdFlag=BPWDF_NoCheck, SecuPwdFlag=BPWDF_NoCheck, OperNo='', TID=0, UserID='')
[]
def __init__(self, TradeCode='', BankID='', BankBranchID='', BrokerID='', BrokerBranchID='', TradeDate='', TradeTime='', BankSerial='', TradingDay='', PlateSerial=0, LastFragment=LF_Yes, SessionID=0, CustomerName='', IdCardType=ICT_EID, IdentifiedCardNo='', Gender=GD_Unknown, CountryCode='', CustType=CUSTT_Person, Address='', ZipCode='', Telephone='', MobilePhone='', Fax='', EMail='', MoneyAccountStatus=MAS_Normal, BankAccount='', BankPassWord='', AccountID='', Password='', InstallID=0, VerifyCertNoFlag=YNI_Yes, CurrencyID='', CashExchangeCode=CEC_Exchange, Digest='', BankAccType=BAT_BankBook, DeviceID='', BankSecuAccType=BAT_BankBook, BrokerIDByBank='', BankSecuAcc='', BankPwdFlag=BPWDF_NoCheck, SecuPwdFlag=BPWDF_NoCheck, OperNo='', TID=0, UserID=''): self.TradeCode = '' #业务功能码, char[7] self.BankID = '' #银行代码, char[4] self.BankBranchID = 'BankBrchID' #银行分支机构代码, char[5] self.BrokerID = '' #期商代码, char[11] self.BrokerBranchID = 'FutureBranchID' #期商分支机构代码, char[31] self.TradeDate = '' #交易日期, char[9] self.TradeTime = '' #交易时间, char[9] self.BankSerial = '' #银行流水号, char[13] self.TradingDay = 'TradeDate' #交易系统日期 , char[9] self.PlateSerial = 'Serial' #银期平台消息流水号, int self.LastFragment = '' #最后分片标志, char self.SessionID = '' #会话号, int self.CustomerName = 'IndividualName' #客户姓名, char[51] self.IdCardType = '' #证件类型, char self.IdentifiedCardNo = '' #证件号码, char[51] self.Gender = '' #性别, char self.CountryCode = '' #国家代码, char[21] self.CustType = '' #客户类型, char self.Address = '' #地址, char[101] self.ZipCode = '' #邮编, char[7] self.Telephone = '' #电话号码, char[41] self.MobilePhone = '' #手机, char[21] self.Fax = '' #传真, char[41] self.EMail = '' #电子邮件, char[41] self.MoneyAccountStatus = '' #资金账户状态, char self.BankAccount = '' #银行帐号, char[41] self.BankPassWord = 'Password' #银行密码, char[41] self.AccountID = '' #投资者帐号, char[13] self.Password = '' #期货密码, char[41] self.InstallID = '' #安装编号, int self.VerifyCertNoFlag = 'YesNoIndicator' #验证客户证件号码标志, char self.CurrencyID = '' #币种代码, char[4] self.CashExchangeCode = '' #汇钞标志, char self.Digest = '' #摘要, char[36] self.BankAccType = '' #银行帐号类型, char self.DeviceID = '' #渠道标志, char[3] self.BankSecuAccType = 'BankAccType' #期货单位帐号类型, char self.BrokerIDByBank = 'BankCodingForFuture' #期货公司银行编码, char[33] self.BankSecuAcc = 'BankAccount' #期货单位帐号, char[41] self.BankPwdFlag = 'PwdFlag' #银行密码标志, char self.SecuPwdFlag = 'PwdFlag' #期货资金密码核对标志, char self.OperNo = '' #交易柜员, char[17] self.TID = '' #交易ID, int self.UserID = ''
[ "def", "__init__", "(", "self", ",", "TradeCode", "=", "''", ",", "BankID", "=", "''", ",", "BankBranchID", "=", "''", ",", "BrokerID", "=", "''", ",", "BrokerBranchID", "=", "''", ",", "TradeDate", "=", "''", ",", "TradeTime", "=", "''", ",", "BankSerial", "=", "''", ",", "TradingDay", "=", "''", ",", "PlateSerial", "=", "0", ",", "LastFragment", "=", "LF_Yes", ",", "SessionID", "=", "0", ",", "CustomerName", "=", "''", ",", "IdCardType", "=", "ICT_EID", ",", "IdentifiedCardNo", "=", "''", ",", "Gender", "=", "GD_Unknown", ",", "CountryCode", "=", "''", ",", "CustType", "=", "CUSTT_Person", ",", "Address", "=", "''", ",", "ZipCode", "=", "''", ",", "Telephone", "=", "''", ",", "MobilePhone", "=", "''", ",", "Fax", "=", "''", ",", "EMail", "=", "''", ",", "MoneyAccountStatus", "=", "MAS_Normal", ",", "BankAccount", "=", "''", ",", "BankPassWord", "=", "''", ",", "AccountID", "=", "''", ",", "Password", "=", "''", ",", "InstallID", "=", "0", ",", "VerifyCertNoFlag", "=", "YNI_Yes", ",", "CurrencyID", "=", "''", ",", "CashExchangeCode", "=", "CEC_Exchange", ",", "Digest", "=", "''", ",", "BankAccType", "=", "BAT_BankBook", ",", "DeviceID", "=", "''", ",", "BankSecuAccType", "=", "BAT_BankBook", ",", "BrokerIDByBank", "=", "''", ",", "BankSecuAcc", "=", "''", ",", "BankPwdFlag", "=", "BPWDF_NoCheck", ",", "SecuPwdFlag", "=", "BPWDF_NoCheck", ",", "OperNo", "=", "''", ",", "TID", "=", "0", ",", "UserID", "=", "''", ")", ":", "self", ".", "TradeCode", "=", "''", "#业务功能码, char[7]", "self", ".", "BankID", "=", "''", "#银行代码, char[4]", "self", ".", "BankBranchID", "=", "'BankBrchID'", "#银行分支机构代码, char[5]", "self", ".", "BrokerID", "=", "''", "#期商代码, char[11]", "self", ".", "BrokerBranchID", "=", "'FutureBranchID'", "#期商分支机构代码, char[31]", "self", ".", "TradeDate", "=", "''", "#交易日期, char[9]", "self", ".", "TradeTime", "=", "''", "#交易时间, char[9]", "self", ".", "BankSerial", "=", "''", "#银行流水号, char[13]", "self", ".", "TradingDay", "=", "'TradeDate'", "#交易系统日期 , char[9]", "self", ".", "PlateSerial", "=", "'Serial'", "#银期平台消息流水号, int", "self", ".", "LastFragment", "=", "''", "#最后分片标志, char", "self", ".", "SessionID", "=", "''", "#会话号, int", "self", ".", "CustomerName", "=", "'IndividualName'", "#客户姓名, char[51]", "self", ".", "IdCardType", "=", "''", "#证件类型, char", "self", ".", "IdentifiedCardNo", "=", "''", "#证件号码, char[51]", "self", ".", "Gender", "=", "''", "#性别, char", "self", ".", "CountryCode", "=", "''", "#国家代码, char[21]", "self", ".", "CustType", "=", "''", "#客户类型, char", "self", ".", "Address", "=", "''", "#地址, char[101]", "self", ".", "ZipCode", "=", "''", "#邮编, char[7]", "self", ".", "Telephone", "=", "''", "#电话号码, char[41]", "self", ".", "MobilePhone", "=", "''", "#手机, char[21]", "self", ".", "Fax", "=", "''", "#传真, char[41]", "self", ".", "EMail", "=", "''", "#电子邮件, char[41]", "self", ".", "MoneyAccountStatus", "=", "''", "#资金账户状态, char", "self", ".", "BankAccount", "=", "''", "#银行帐号, char[41]", "self", ".", "BankPassWord", "=", "'Password'", "#银行密码, char[41]", "self", ".", "AccountID", "=", "''", "#投资者帐号, char[13]", "self", ".", "Password", "=", "''", "#期货密码, char[41]", "self", ".", "InstallID", "=", "''", "#安装编号, int", "self", ".", "VerifyCertNoFlag", "=", "'YesNoIndicator'", "#验证客户证件号码标志, char", "self", ".", "CurrencyID", "=", "''", "#币种代码, char[4]", "self", ".", "CashExchangeCode", "=", "''", "#汇钞标志, char", "self", ".", "Digest", "=", "''", "#摘要, char[36]", "self", ".", "BankAccType", "=", "''", "#银行帐号类型, char", "self", ".", "DeviceID", "=", "''", "#渠道标志, char[3]", "self", ".", "BankSecuAccType", "=", "'BankAccType'", "#期货单位帐号类型, char", "self", ".", "BrokerIDByBank", "=", "'BankCodingForFuture'", "#期货公司银行编码, char[33]", "self", ".", "BankSecuAcc", "=", "'BankAccount'", "#期货单位帐号, char[41]", "self", ".", "BankPwdFlag", "=", "'PwdFlag'", "#银行密码标志, char", "self", ".", "SecuPwdFlag", "=", "'PwdFlag'", "#期货资金密码核对标志, char", "self", ".", "OperNo", "=", "''", "#交易柜员, char[17]", "self", ".", "TID", "=", "''", "#交易ID, int", "self", ".", "UserID", "=", "''" ]
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/futures/ctp/ApiStruct.py#L4757-L4801
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/conversations/v1/user/__init__.py
python
UserInstance.friendly_name
(self)
return self._properties['friendly_name']
:returns: The string that you assigned to describe the resource :rtype: unicode
:returns: The string that you assigned to describe the resource :rtype: unicode
[ ":", "returns", ":", "The", "string", "that", "you", "assigned", "to", "describe", "the", "resource", ":", "rtype", ":", "unicode" ]
def friendly_name(self): """ :returns: The string that you assigned to describe the resource :rtype: unicode """ return self._properties['friendly_name']
[ "def", "friendly_name", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'friendly_name'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/conversations/v1/user/__init__.py#L386-L391
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_internal/utils/misc.py
python
renames
(old, new)
Like os.renames(), but handles renaming across devices.
Like os.renames(), but handles renaming across devices.
[ "Like", "os", ".", "renames", "()", "but", "handles", "renaming", "across", "devices", "." ]
def renames(old, new): """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) if head and tail and not os.path.exists(head): os.makedirs(head) shutil.move(old, new) head, tail = os.path.split(old) if head and tail: try: os.removedirs(head) except OSError: pass
[ "def", "renames", "(", "old", ",", "new", ")", ":", "# Implementation borrowed from os.renames().", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "new", ")", "if", "head", "and", "tail", "and", "not", "os", ".", "path", ".", "exists", "(", "head", ")", ":", "os", ".", "makedirs", "(", "head", ")", "shutil", ".", "move", "(", "old", ",", "new", ")", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "old", ")", "if", "head", "and", "tail", ":", "try", ":", "os", ".", "removedirs", "(", "head", ")", "except", "OSError", ":", "pass" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_internal/utils/misc.py#L269-L283
kanzure/nanoengineer
874e4c9f8a9190f093625b267f9767e19f82e6c4
cad/src/commands/BuildCrystal/CrystalShape.py
python
CrystalShape._updateBBox
(self, curveList)
Recompute the bounding box for the list of curves
Recompute the bounding box for the list of curves
[ "Recompute", "the", "bounding", "box", "for", "the", "list", "of", "curves" ]
def _updateBBox(self, curveList): """ Recompute the bounding box for the list of curves """ bbox = BBox() for c in curveList[1:]: bbox.merge(c.bbox) curveList[0] = bbox
[ "def", "_updateBBox", "(", "self", ",", "curveList", ")", ":", "bbox", "=", "BBox", "(", ")", "for", "c", "in", "curveList", "[", "1", ":", "]", ":", "bbox", ".", "merge", "(", "c", ".", "bbox", ")", "curveList", "[", "0", "]", "=", "bbox" ]
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/commands/BuildCrystal/CrystalShape.py#L213-L220
billpmurphy/hask
4609cc8d9d975f51b6ecdbd33640cdffdc28f953
hask/Prelude.py
python
sequence_
(xs)
sequence_ :: Monad m => [m a] -> m None Evaluate each action in the sequence from left to right, and ignore the results.
sequence_ :: Monad m => [m a] -> m None
[ "sequence_", "::", "Monad", "m", "=", ">", "[", "m", "a", "]", "-", ">", "m", "None" ]
def sequence_(xs): """ sequence_ :: Monad m => [m a] -> m None Evaluate each action in the sequence from left to right, and ignore the results. """ raise NotImplementedError()
[ "def", "sequence_", "(", "xs", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/billpmurphy/hask/blob/4609cc8d9d975f51b6ecdbd33640cdffdc28f953/hask/Prelude.py#L209-L216
minio/minio-py
b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3
minio/minioadmin.py
python
MinioAdmin.kms_key_create
(self, key=None)
return self._run( [ "kms", "key", "create", self._target, key ] + ([key] if key else []), )
Create a new KMS master key.
Create a new KMS master key.
[ "Create", "a", "new", "KMS", "master", "key", "." ]
def kms_key_create(self, key=None): """Create a new KMS master key.""" return self._run( [ "kms", "key", "create", self._target, key ] + ([key] if key else []), )
[ "def", "kms_key_create", "(", "self", ",", "key", "=", "None", ")", ":", "return", "self", ".", "_run", "(", "[", "\"kms\"", ",", "\"key\"", ",", "\"create\"", ",", "self", ".", "_target", ",", "key", "]", "+", "(", "[", "key", "]", "if", "key", "else", "[", "]", ")", ",", ")" ]
https://github.com/minio/minio-py/blob/b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3/minio/minioadmin.py#L206-L212
python-social-auth/social-core
1ea27e8989657bb35dd37b6ee2e038e1358fbc96
social_core/backends/douban.py
python
DoubanOAuth2.user_data
(self, access_token, *args, **kwargs)
return self.get_json( 'https://api.douban.com/v2/user/~me', headers={'Authorization': f'Bearer {access_token}'} )
Return user data provided
Return user data provided
[ "Return", "user", "data", "provided" ]
def user_data(self, access_token, *args, **kwargs): """Return user data provided""" return self.get_json( 'https://api.douban.com/v2/user/~me', headers={'Authorization': f'Bearer {access_token}'} )
[ "def", "user_data", "(", "self", ",", "access_token", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "get_json", "(", "'https://api.douban.com/v2/user/~me'", ",", "headers", "=", "{", "'Authorization'", ":", "f'Bearer {access_token}'", "}", ")" ]
https://github.com/python-social-auth/social-core/blob/1ea27e8989657bb35dd37b6ee2e038e1358fbc96/social_core/backends/douban.py#L54-L59
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/xmlrpc/client.py
python
Marshaller.dump_int
(self, value, write)
[]
def dump_int(self, value, write): # in case ints are > 32 bits if value > MAXINT or value < MININT: raise OverflowError("int exceeds XML-RPC limits") write("<value><int>") write(str(value)) write("</int></value>\n")
[ "def", "dump_int", "(", "self", ",", "value", ",", "write", ")", ":", "# in case ints are > 32 bits", "if", "value", ">", "MAXINT", "or", "value", "<", "MININT", ":", "raise", "OverflowError", "(", "\"int exceeds XML-RPC limits\"", ")", "write", "(", "\"<value><int>\"", ")", "write", "(", "str", "(", "value", ")", ")", "write", "(", "\"</int></value>\\n\"", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/xmlrpc/client.py#L525-L531
theotherp/nzbhydra
4b03d7f769384b97dfc60dade4806c0fc987514e
libs/requests/packages/urllib3/response.py
python
HTTPResponse._init_length
(self, request_method)
return length
Set initial length value for Response content if available.
Set initial length value for Response content if available.
[ "Set", "initial", "length", "value", "for", "Response", "content", "if", "available", "." ]
def _init_length(self, request_method): """ Set initial length value for Response content if available. """ length = self.headers.get('content-length') if length is not None and self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. log.warning("Received response with both Content-Length and " "Transfer-Encoding set. This is expressly forbidden " "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " "attempting to process response as Transfer-Encoding: " "chunked.") return None elif length is not None: try: # RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. lengths = set([int(val) for val in length.split(',')]) if len(lengths) > 1: raise InvalidHeader("Content-Length contained multiple " "unmatching values (%s)" % length) length = lengths.pop() except ValueError: length = None else: if length < 0: length = None # Convert status to int for comparison # In some cases, httplib returns a status of "_UNKNOWN" try: status = int(self.status) except ValueError: status = 0 # Check for responses that shouldn't include a body if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': length = 0 return length
[ "def", "_init_length", "(", "self", ",", "request_method", ")", ":", "length", "=", "self", ".", "headers", ".", "get", "(", "'content-length'", ")", "if", "length", "is", "not", "None", "and", "self", ".", "chunked", ":", "# This Response will fail with an IncompleteRead if it can't be", "# received as chunked. This method falls back to attempt reading", "# the response before raising an exception.", "log", ".", "warning", "(", "\"Received response with both Content-Length and \"", "\"Transfer-Encoding set. This is expressly forbidden \"", "\"by RFC 7230 sec 3.3.2. Ignoring Content-Length and \"", "\"attempting to process response as Transfer-Encoding: \"", "\"chunked.\"", ")", "return", "None", "elif", "length", "is", "not", "None", ":", "try", ":", "# RFC 7230 section 3.3.2 specifies multiple content lengths can", "# be sent in a single Content-Length header", "# (e.g. Content-Length: 42, 42). This line ensures the values", "# are all valid ints and that as long as the `set` length is 1,", "# all values are the same. Otherwise, the header is invalid.", "lengths", "=", "set", "(", "[", "int", "(", "val", ")", "for", "val", "in", "length", ".", "split", "(", "','", ")", "]", ")", "if", "len", "(", "lengths", ")", ">", "1", ":", "raise", "InvalidHeader", "(", "\"Content-Length contained multiple \"", "\"unmatching values (%s)\"", "%", "length", ")", "length", "=", "lengths", ".", "pop", "(", ")", "except", "ValueError", ":", "length", "=", "None", "else", ":", "if", "length", "<", "0", ":", "length", "=", "None", "# Convert status to int for comparison", "# In some cases, httplib returns a status of \"_UNKNOWN\"", "try", ":", "status", "=", "int", "(", "self", ".", "status", ")", "except", "ValueError", ":", "status", "=", "0", "# Check for responses that shouldn't include a body", "if", "status", "in", "(", "204", ",", "304", ")", "or", "100", "<=", "status", "<", "200", "or", "request_method", "==", "'HEAD'", ":", "length", "=", "0", "return", "length" ]
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/requests/packages/urllib3/response.py#L198-L244
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_adm_ca_server_cert.py
python
OpenShiftCLIConfig.config_options
(self)
return self._options
return config options
return config options
[ "return", "config", "options" ]
def config_options(self): ''' return config options ''' return self._options
[ "def", "config_options", "(", "self", ")", ":", "return", "self", ".", "_options" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_adm_ca_server_cert.py#L1459-L1461
UniShared/videonotes
803cdd97b90823fb17f50dd55999aa7d1fec6c3a
lib/oauth2/__init__.py
python
Request.to_postdata
(self)
return urllib.urlencode(d, True).replace('+', '%20')
Serialize as post data for a POST request.
Serialize as post data for a POST request.
[ "Serialize", "as", "post", "data", "for", "a", "POST", "request", "." ]
def to_postdata(self): """Serialize as post data for a POST request.""" d = {} for k, v in self.iteritems(): d[k.encode('utf-8')] = to_utf8_optional_iterator(v) # tell urlencode to deal with sequence values and map them correctly # to resulting querystring. for example self["k"] = ["v1", "v2"] will # result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D return urllib.urlencode(d, True).replace('+', '%20')
[ "def", "to_postdata", "(", "self", ")", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "iteritems", "(", ")", ":", "d", "[", "k", ".", "encode", "(", "'utf-8'", ")", "]", "=", "to_utf8_optional_iterator", "(", "v", ")", "# tell urlencode to deal with sequence values and map them correctly", "# to resulting querystring. for example self[\"k\"] = [\"v1\", \"v2\"] will", "# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D", "return", "urllib", ".", "urlencode", "(", "d", ",", "True", ")", ".", "replace", "(", "'+'", ",", "'%20'", ")" ]
https://github.com/UniShared/videonotes/blob/803cdd97b90823fb17f50dd55999aa7d1fec6c3a/lib/oauth2/__init__.py#L402-L411
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/site-packages/django/contrib/gis/db/models/sql/compiler.py
python
GeoSQLCompiler.get_default_columns
(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False)
return result, aliases
Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). This routine is overridden from Query to handle customized selection of geometry columns.
Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal.
[ "Computes", "the", "default", "columns", "for", "selecting", "every", "field", "in", "the", "base", "model", ".", "Will", "sometimes", "be", "called", "to", "pull", "in", "related", "models", "(", "e", ".", "g", ".", "via", "select_related", ")", "in", "which", "case", "opts", "and", "start_alias", "will", "be", "given", "to", "provide", "a", "starting", "point", "for", "the", "traversal", "." ]
def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). This routine is overridden from Query to handle customized selection of geometry columns. """ result = [] if opts is None: opts = self.query.model._meta aliases = set() only_load = self.deferred_to_columns() # Skip all proxy to the root proxied model proxied_model = opts.concrete_model if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): if local_only and model is not None: continue if start_alias: try: alias = seen[model] except KeyError: if model is proxied_model: alias = start_alias else: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias][TABLE_NAME] if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue # This part of the function is customized for GeoQuery. We # see if there was any custom selection specified in the # dictionary, and set up the selection format appropriately. field_sel = self.get_field_select(field, alias) if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (field_sel, c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = field_sel result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases
[ "def", "get_default_columns", "(", "self", ",", "with_aliases", "=", "False", ",", "col_aliases", "=", "None", ",", "start_alias", "=", "None", ",", "opts", "=", "None", ",", "as_pairs", "=", "False", ",", "local_only", "=", "False", ")", ":", "result", "=", "[", "]", "if", "opts", "is", "None", ":", "opts", "=", "self", ".", "query", ".", "model", ".", "_meta", "aliases", "=", "set", "(", ")", "only_load", "=", "self", ".", "deferred_to_columns", "(", ")", "# Skip all proxy to the root proxied model", "proxied_model", "=", "opts", ".", "concrete_model", "if", "start_alias", ":", "seen", "=", "{", "None", ":", "start_alias", "}", "for", "field", ",", "model", "in", "opts", ".", "get_fields_with_model", "(", ")", ":", "if", "local_only", "and", "model", "is", "not", "None", ":", "continue", "if", "start_alias", ":", "try", ":", "alias", "=", "seen", "[", "model", "]", "except", "KeyError", ":", "if", "model", "is", "proxied_model", ":", "alias", "=", "start_alias", "else", ":", "link_field", "=", "opts", ".", "get_ancestor_link", "(", "model", ")", "alias", "=", "self", ".", "query", ".", "join", "(", "(", "start_alias", ",", "model", ".", "_meta", ".", "db_table", ",", "link_field", ".", "column", ",", "model", ".", "_meta", ".", "pk", ".", "column", ")", ")", "seen", "[", "model", "]", "=", "alias", "else", ":", "# If we're starting from the base model of the queryset, the", "# aliases will have already been set up in pre_sql_setup(), so", "# we can save time here.", "alias", "=", "self", ".", "query", ".", "included_inherited_models", "[", "model", "]", "table", "=", "self", ".", "query", ".", "alias_map", "[", "alias", "]", "[", "TABLE_NAME", "]", "if", "table", "in", "only_load", "and", "field", ".", "column", "not", "in", "only_load", "[", "table", "]", ":", "continue", "if", "as_pairs", ":", "result", ".", "append", "(", "(", "alias", ",", "field", ".", "column", ")", ")", "aliases", ".", "add", "(", "alias", ")", "continue", "# This part of the function is customized for GeoQuery. We", "# see if there was any custom selection specified in the", "# dictionary, and set up the selection format appropriately.", "field_sel", "=", "self", ".", "get_field_select", "(", "field", ",", "alias", ")", "if", "with_aliases", "and", "field", ".", "column", "in", "col_aliases", ":", "c_alias", "=", "'Col%d'", "%", "len", "(", "col_aliases", ")", "result", ".", "append", "(", "'%s AS %s'", "%", "(", "field_sel", ",", "c_alias", ")", ")", "col_aliases", ".", "add", "(", "c_alias", ")", "aliases", ".", "add", "(", "c_alias", ")", "else", ":", "r", "=", "field_sel", "result", ".", "append", "(", "r", ")", "aliases", ".", "add", "(", "r", ")", "if", "with_aliases", ":", "col_aliases", ".", "add", "(", "field", ".", "column", ")", "return", "result", ",", "aliases" ]
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/site-packages/django/contrib/gis/db/models/sql/compiler.py#L96-L163
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/transforms.py
python
CompositeGenericTransform.__init__
(self, a, b, **kwargs)
Create a new composite transform that is the result of applying transform *a* then transform *b*. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create.
Create a new composite transform that is the result of applying transform *a* then transform *b*.
[ "Create", "a", "new", "composite", "transform", "that", "is", "the", "result", "of", "applying", "transform", "*", "a", "*", "then", "transform", "*", "b", "*", "." ]
def __init__(self, a, b, **kwargs): """ Create a new composite transform that is the result of applying transform *a* then transform *b*. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create. """ assert a.output_dims == b.input_dims self.input_dims = a.input_dims self.output_dims = b.output_dims Transform.__init__(self, **kwargs) self._a = a self._b = b self.set_children(a, b)
[ "def", "__init__", "(", "self", ",", "a", ",", "b", ",", "*", "*", "kwargs", ")", ":", "assert", "a", ".", "output_dims", "==", "b", ".", "input_dims", "self", ".", "input_dims", "=", "a", ".", "input_dims", "self", ".", "output_dims", "=", "b", ".", "output_dims", "Transform", ".", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", "self", ".", "_a", "=", "a", "self", ".", "_b", "=", "b", "self", ".", "set_children", "(", "a", ",", "b", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/transforms.py#L2135-L2152
pythonql/pythonql
10ded9473eee8dc75630c2c67f06b6f86a0af305
pythonql/parser/PythonQLParser.py
python
Parser.p_let_clause_entry
(self, p)
let_clause_entry : expr_list '=' test
let_clause_entry : expr_list '=' test
[ "let_clause_entry", ":", "expr_list", "=", "test" ]
def p_let_clause_entry(self, p): """let_clause_entry : expr_list '=' test""" p[0] = make_node('let_clause_entry', p)
[ "def", "p_let_clause_entry", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "make_node", "(", "'let_clause_entry'", ",", "p", ")" ]
https://github.com/pythonql/pythonql/blob/10ded9473eee8dc75630c2c67f06b6f86a0af305/pythonql/parser/PythonQLParser.py#L714-L716
dmlc/dgl
8d14a739bc9e446d6c92ef83eafe5782398118de
python/dgl/_deprecate/graph.py
python
DGLBaseGraph.edge_id
(self, u, v, force_multi=None, return_array=False)
Return the edge ID, or an array of edge IDs, between source node `u` and destination node `v`. Parameters ---------- u : int The source node ID. v : int The destination node ID. force_multi : bool Deprecated (Will be deleted in the future). If False, will return a single edge ID. If True, will always return an array. return_array : bool If False, will return a single edge ID. If True, will always return an array. Returns ------- int or tensor The edge ID if return_array is False. The edge ID array otherwise. Notes ----- If multiply edges exist between `u` and `v` and return_array is False, the result is undefined. Examples -------- The following example uses PyTorch backend. For simple graphs: >>> G = dgl.DGLGraph() >>> G.add_node(3) >>> G.add_edges([0, 0], [1, 2]) # (0, 1), (0, 2) >>> G.edge_id(0, 2) 1 >>> G.edge_id(0, 1) 0 For multigraphs: >>> G = dgl.DGLGraph() >>> G.add_nodes(3) Adding edges (0, 1), (0, 2), (0, 1), (0, 2), so edge ID 0 and 2 both connect from 0 and 1, while edge ID 1 and 3 both connect from 0 and 2. >>> G.add_edges([0, 0, 0, 0], [1, 2, 1, 2]) >>> G.edge_id(0, 1, return_array=True) tensor([0, 2]) See Also -------- edge_ids
Return the edge ID, or an array of edge IDs, between source node `u` and destination node `v`.
[ "Return", "the", "edge", "ID", "or", "an", "array", "of", "edge", "IDs", "between", "source", "node", "u", "and", "destination", "node", "v", "." ]
def edge_id(self, u, v, force_multi=None, return_array=False): """Return the edge ID, or an array of edge IDs, between source node `u` and destination node `v`. Parameters ---------- u : int The source node ID. v : int The destination node ID. force_multi : bool Deprecated (Will be deleted in the future). If False, will return a single edge ID. If True, will always return an array. return_array : bool If False, will return a single edge ID. If True, will always return an array. Returns ------- int or tensor The edge ID if return_array is False. The edge ID array otherwise. Notes ----- If multiply edges exist between `u` and `v` and return_array is False, the result is undefined. Examples -------- The following example uses PyTorch backend. For simple graphs: >>> G = dgl.DGLGraph() >>> G.add_node(3) >>> G.add_edges([0, 0], [1, 2]) # (0, 1), (0, 2) >>> G.edge_id(0, 2) 1 >>> G.edge_id(0, 1) 0 For multigraphs: >>> G = dgl.DGLGraph() >>> G.add_nodes(3) Adding edges (0, 1), (0, 2), (0, 1), (0, 2), so edge ID 0 and 2 both connect from 0 and 1, while edge ID 1 and 3 both connect from 0 and 2. >>> G.add_edges([0, 0, 0, 0], [1, 2, 1, 2]) >>> G.edge_id(0, 1, return_array=True) tensor([0, 2]) See Also -------- edge_ids """ idx = self._graph.edge_id(u, v) if force_multi is not None: dgl_warning("force_multi will be deprecated." \ "Please use return_array instead") return_array = force_multi if return_array: return idx.tousertensor() else: assert len(idx) == 1, "For return_array=False, there should be one and " \ "only one edge between u and v, but get {} edges. " \ "Please use return_array=True instead".format(len(idx)) return idx[0]
[ "def", "edge_id", "(", "self", ",", "u", ",", "v", ",", "force_multi", "=", "None", ",", "return_array", "=", "False", ")", ":", "idx", "=", "self", ".", "_graph", ".", "edge_id", "(", "u", ",", "v", ")", "if", "force_multi", "is", "not", "None", ":", "dgl_warning", "(", "\"force_multi will be deprecated.\"", "\"Please use return_array instead\"", ")", "return_array", "=", "force_multi", "if", "return_array", ":", "return", "idx", ".", "tousertensor", "(", ")", "else", ":", "assert", "len", "(", "idx", ")", "==", "1", ",", "\"For return_array=False, there should be one and \"", "\"only one edge between u and v, but get {} edges. \"", "\"Please use return_array=True instead\"", ".", "format", "(", "len", "(", "idx", ")", ")", "return", "idx", "[", "0", "]" ]
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/python/dgl/_deprecate/graph.py#L322-L393
bugy/script-server
9a57ce15903c81bcb537b872f1330ee55ba31563
tools/build.py
python
BuildInfo.get_files
(self)
return self.files
[]
def get_files(self): return self.files
[ "def", "get_files", "(", "self", ")", ":", "return", "self", ".", "files" ]
https://github.com/bugy/script-server/blob/9a57ce15903c81bcb537b872f1330ee55ba31563/tools/build.py#L47-L48
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/plugin_base/plugin.py
python
PluginBase._drag_drop
(self, widget, drag_context, x, y, timestamp)
return True
Handle dropping data over widget
Handle dropping data over widget
[ "Handle", "dropping", "data", "over", "widget" ]
def _drag_drop(self, widget, drag_context, x, y, timestamp): """Handle dropping data over widget""" return True
[ "def", "_drag_drop", "(", "self", ",", "widget", ",", "drag_context", ",", "x", ",", "y", ",", "timestamp", ")", ":", "return", "True" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/plugin_base/plugin.py#L161-L163
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/idlelib/configDialog.py
python
ConfigDialog.SaveNewTheme
(self,themeName,theme)
save a newly created theme. themeName - string, the name of the new theme theme - dictionary containing the new theme
save a newly created theme. themeName - string, the name of the new theme theme - dictionary containing the new theme
[ "save", "a", "newly", "created", "theme", ".", "themeName", "-", "string", "the", "name", "of", "the", "new", "theme", "theme", "-", "dictionary", "containing", "the", "new", "theme" ]
def SaveNewTheme(self,themeName,theme): """ save a newly created theme. themeName - string, the name of the new theme theme - dictionary containing the new theme """ if not idleConf.userCfg['highlight'].has_section(themeName): idleConf.userCfg['highlight'].add_section(themeName) for element in theme.keys(): value=theme[element] idleConf.userCfg['highlight'].SetOption(themeName,element,value)
[ "def", "SaveNewTheme", "(", "self", ",", "themeName", ",", "theme", ")", ":", "if", "not", "idleConf", ".", "userCfg", "[", "'highlight'", "]", ".", "has_section", "(", "themeName", ")", ":", "idleConf", ".", "userCfg", "[", "'highlight'", "]", ".", "add_section", "(", "themeName", ")", "for", "element", "in", "theme", ".", "keys", "(", ")", ":", "value", "=", "theme", "[", "element", "]", "idleConf", ".", "userCfg", "[", "'highlight'", "]", ".", "SetOption", "(", "themeName", ",", "element", ",", "value", ")" ]
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/idlelib/configDialog.py#L1075-L1085
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
pytorch/pytorchcv/models/sparsenet.py
python
sparsenet201
(**kwargs)
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters.
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
[ "SparseNet", "-", "201", "model", "from", "Sparsely", "Aggregated", "Convolutional", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1801", ".", "05895", "." ]
def sparsenet201(**kwargs): """ SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
[ "def", "sparsenet201", "(", "*", "*", "kwargs", ")", ":", "return", "get_sparsenet", "(", "num_layers", "=", "201", ",", "model_name", "=", "\"sparsenet201\"", ",", "*", "*", "kwargs", ")" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/pytorch/pytorchcv/models/sparsenet.py#L309-L320
openbmc/openbmc
5f4109adae05f4d6925bfe960007d52f98c61086
poky/bitbake/lib/toaster/orm/models.py
python
Project.get_available_recipes
(self)
return queryset
Returns QuerySet of all the recipes that are provided by layers added to this project
Returns QuerySet of all the recipes that are provided by layers added to this project
[ "Returns", "QuerySet", "of", "all", "the", "recipes", "that", "are", "provided", "by", "layers", "added", "to", "this", "project" ]
def get_available_recipes(self): """ Returns QuerySet of all the recipes that are provided by layers added to this project """ queryset = Recipe.objects.filter( layer_version__in=self.get_project_layer_versions()) return queryset
[ "def", "get_available_recipes", "(", "self", ")", ":", "queryset", "=", "Recipe", ".", "objects", ".", "filter", "(", "layer_version__in", "=", "self", ".", "get_project_layer_versions", "(", ")", ")", "return", "queryset" ]
https://github.com/openbmc/openbmc/blob/5f4109adae05f4d6925bfe960007d52f98c61086/poky/bitbake/lib/toaster/orm/models.py#L347-L353
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/argparse.py
python
ArgumentParser._add_action
(self, action)
return action
[]
def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action
[ "def", "_add_action", "(", "self", ",", "action", ")", ":", "if", "action", ".", "option_strings", ":", "self", ".", "_optionals", ".", "_add_action", "(", "action", ")", "else", ":", "self", ".", "_positionals", ".", "_add_action", "(", "action", ")", "return", "action" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/argparse.py#L1680-L1685
sideeffects/SideFXLabs
956bc1eef6710882ae8d3a31b4a33dd631a56d5f
scripts/python/hou_settings.py
python
Settings._save
(self)
[]
def _save(self): with open(self.filename, 'w') as fp: json.dump(self._settings_dic, fp)
[ "def", "_save", "(", "self", ")", ":", "with", "open", "(", "self", ".", "filename", ",", "'w'", ")", "as", "fp", ":", "json", ".", "dump", "(", "self", ".", "_settings_dic", ",", "fp", ")" ]
https://github.com/sideeffects/SideFXLabs/blob/956bc1eef6710882ae8d3a31b4a33dd631a56d5f/scripts/python/hou_settings.py#L57-L59
facebookresearch/hydra
9b2f4d54b328d1551aa70a241a1d638cbe046367
examples/patterns/multi-select/my_app.py
python
my_app
(cfg: DictConfig)
[]
def my_app(cfg: DictConfig) -> None: print(OmegaConf.to_yaml(cfg))
[ "def", "my_app", "(", "cfg", ":", "DictConfig", ")", "->", "None", ":", "print", "(", "OmegaConf", ".", "to_yaml", "(", "cfg", ")", ")" ]
https://github.com/facebookresearch/hydra/blob/9b2f4d54b328d1551aa70a241a1d638cbe046367/examples/patterns/multi-select/my_app.py#L8-L9
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/pymongo/master_slave_connection.py
python
MasterSlaveConnection._send_message_with_response
(self, message, _connection_to_use=None, _must_use_master=False, **kwargs)
Receive a message from Mongo. Sends the given message and returns a (connection_id, response) pair. :Parameters: - `operation`: opcode of the message to send - `data`: data to send
Receive a message from Mongo.
[ "Receive", "a", "message", "from", "Mongo", "." ]
def _send_message_with_response(self, message, _connection_to_use=None, _must_use_master=False, **kwargs): """Receive a message from Mongo. Sends the given message and returns a (connection_id, response) pair. :Parameters: - `operation`: opcode of the message to send - `data`: data to send """ if _connection_to_use is not None: if _connection_to_use == -1: return (-1, self.__master._send_message_with_response(message, **kwargs)) else: return (_connection_to_use, self.__slaves[_connection_to_use] ._send_message_with_response(message, **kwargs)) # _must_use_master is set for commands, which must be sent to the # master instance. any queries in a request must be sent to the # master since that is where writes go. if _must_use_master or self.in_request(): return (-1, self.__master._send_message_with_response(message, **kwargs)) # Iterate through the slaves randomly until we have success. Raise # reconnect if they all fail. for connection_id in helpers.shuffled(xrange(len(self.__slaves))): try: slave = self.__slaves[connection_id] return (connection_id, slave._send_message_with_response(message, **kwargs)) except AutoReconnect: pass raise AutoReconnect("failed to connect to slaves")
[ "def", "_send_message_with_response", "(", "self", ",", "message", ",", "_connection_to_use", "=", "None", ",", "_must_use_master", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "_connection_to_use", "is", "not", "None", ":", "if", "_connection_to_use", "==", "-", "1", ":", "return", "(", "-", "1", ",", "self", ".", "__master", ".", "_send_message_with_response", "(", "message", ",", "*", "*", "kwargs", ")", ")", "else", ":", "return", "(", "_connection_to_use", ",", "self", ".", "__slaves", "[", "_connection_to_use", "]", ".", "_send_message_with_response", "(", "message", ",", "*", "*", "kwargs", ")", ")", "# _must_use_master is set for commands, which must be sent to the", "# master instance. any queries in a request must be sent to the", "# master since that is where writes go.", "if", "_must_use_master", "or", "self", ".", "in_request", "(", ")", ":", "return", "(", "-", "1", ",", "self", ".", "__master", ".", "_send_message_with_response", "(", "message", ",", "*", "*", "kwargs", ")", ")", "# Iterate through the slaves randomly until we have success. Raise", "# reconnect if they all fail.", "for", "connection_id", "in", "helpers", ".", "shuffled", "(", "xrange", "(", "len", "(", "self", ".", "__slaves", ")", ")", ")", ":", "try", ":", "slave", "=", "self", ".", "__slaves", "[", "connection_id", "]", "return", "(", "connection_id", ",", "slave", ".", "_send_message_with_response", "(", "message", ",", "*", "*", "kwargs", ")", ")", "except", "AutoReconnect", ":", "pass", "raise", "AutoReconnect", "(", "\"failed to connect to slaves\"", ")" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/pymongo/master_slave_connection.py#L169-L206
ReactionMechanismGenerator/RMG-Py
2b7baf51febf27157def58fb3f6cee03fb6a684c
rmgpy/tools/globaluncertainty.py
python
ReactorPCEFactory.analyze_results
(self, log=True)
return mean, var, cov, main_sens, total_sens
Obtain the results: the prediction mean and variance, as well as the global sensitivity indices Returns a tuple containing the following statistics (mean species mole fractions, variance, covariance, main sensitivity indices, total sensitivity indices)
Obtain the results: the prediction mean and variance, as well as the global sensitivity indices Returns a tuple containing the following statistics (mean species mole fractions, variance, covariance, main sensitivity indices, total sensitivity indices)
[ "Obtain", "the", "results", ":", "the", "prediction", "mean", "and", "variance", "as", "well", "as", "the", "global", "sensitivity", "indices", "Returns", "a", "tuple", "containing", "the", "following", "statistics", "(", "mean", "species", "mole", "fractions", "variance", "covariance", "main", "sensitivity", "indices", "total", "sensitivity", "indices", ")" ]
def analyze_results(self, log=True): """ Obtain the results: the prediction mean and variance, as well as the global sensitivity indices Returns a tuple containing the following statistics (mean species mole fractions, variance, covariance, main sensitivity indices, total sensitivity indices) """ # Compute the mean and variance for each of the uncertain parameters mean = np.array(self.pce.Mean()) var = np.array(self.pce.Variance()) stddev = np.sqrt(var) stddev_percent = stddev / mean * 100.0 cov = self.pce.Covariance() # Extract the global sensitivity indices main_sens = np.array(self.pce.MainSensitivity()) total_sens = np.array(self.pce.TotalSensitivity()) output = '' for i in range(self.reactor_mod.num_conditions): output += """============================================================ Condition {0} ------------------------------------------------------------ {1!s} ============================================================ Condition {0} {2}Mole Fractions ------------------------------------------------------------ Species Mean Stddev Stddev (%) ------------------------------------------------------------ """.format(i + 1, self.reactor_mod.cantera.conditions[i], 'Log ' if self.logx else '') for j, output_species in enumerate(self.reactor_mod.output_species_list): output_index = i * self.reactor_mod.num_output_species + j output += '{0:<15}{1:>15.3e}{2:>15.3e}{3:>15.3f}\n'.format(output_species.to_chemkin(), mean[output_index], stddev[output_index], stddev_percent[output_index]) output += '============================================================\n\n' if self.reactor_mod.k_params: output += """==================================================================================================== Condition {0} Reaction Rate Sensitivity Indices ---------------------------------------------------------------------------------------------------- Description sens_main sens_total """.format(i + 1) for j, output_species in enumerate(self.reactor_mod.output_species_list): output += '----------------------------------------------------------------------------------------------------\n' output_index = i * self.reactor_mod.num_output_species + j for k, descriptor in enumerate(self.reactor_mod.k_params): parameter_index = k if not self.reactor_mod.correlated: description = 'dln[{0}]/dln[{1}]'.format(output_species.to_chemkin(), self.reactor_mod.cantera.reaction_list[descriptor].to_chemkin(kinetics=False)) else: description = 'dln[{0}]/dln[{1}]'.format(output_species.to_chemkin(), descriptor) output += '{0:<70}{1:>14.3f}%{2:>14.3f}%\n'.format(description, 100 * main_sens[output_index][parameter_index], 100 * total_sens[output_index][parameter_index]) output += '====================================================================================================\n\n' if self.reactor_mod.g_params: output += """==================================================================================================== Condition {0} Thermochemistry Sensitivity Indices ---------------------------------------------------------------------------------------------------- Description sens_main sens_total """.format(i + 1) for j, output_species in enumerate(self.reactor_mod.output_species_list): output += '----------------------------------------------------------------------------------------------------\n' output_index = i * self.reactor_mod.num_output_species + j for g, descriptor in enumerate(self.reactor_mod.g_params): parameter_index = len(self.reactor_mod.k_params) + g if not self.reactor_mod.correlated: description = 'dln[{0}]/dG[{1}]'.format(output_species.to_chemkin(), self.reactor_mod.cantera.species_list[descriptor].to_chemkin()) else: description = 'dln[{0}]/dG[{1}]'.format(output_species.to_chemkin(), descriptor) output += '{0:<70}{1:>14.3f}%{2:>14.3f}%\n'.format(description, 100 * main_sens[output_index][parameter_index], 100 * total_sens[output_index][parameter_index]) output += '====================================================================================================\n\n' if log: logging.info(output) else: print(output) return mean, var, cov, main_sens, total_sens
[ "def", "analyze_results", "(", "self", ",", "log", "=", "True", ")", ":", "# Compute the mean and variance for each of the uncertain parameters", "mean", "=", "np", ".", "array", "(", "self", ".", "pce", ".", "Mean", "(", ")", ")", "var", "=", "np", ".", "array", "(", "self", ".", "pce", ".", "Variance", "(", ")", ")", "stddev", "=", "np", ".", "sqrt", "(", "var", ")", "stddev_percent", "=", "stddev", "/", "mean", "*", "100.0", "cov", "=", "self", ".", "pce", ".", "Covariance", "(", ")", "# Extract the global sensitivity indices", "main_sens", "=", "np", ".", "array", "(", "self", ".", "pce", ".", "MainSensitivity", "(", ")", ")", "total_sens", "=", "np", ".", "array", "(", "self", ".", "pce", ".", "TotalSensitivity", "(", ")", ")", "output", "=", "''", "for", "i", "in", "range", "(", "self", ".", "reactor_mod", ".", "num_conditions", ")", ":", "output", "+=", "\"\"\"============================================================\nCondition {0}\n------------------------------------------------------------\n{1!s}\n============================================================\nCondition {0} {2}Mole Fractions\n------------------------------------------------------------\nSpecies Mean Stddev Stddev (%)\n------------------------------------------------------------\n\"\"\"", ".", "format", "(", "i", "+", "1", ",", "self", ".", "reactor_mod", ".", "cantera", ".", "conditions", "[", "i", "]", ",", "'Log '", "if", "self", ".", "logx", "else", "''", ")", "for", "j", ",", "output_species", "in", "enumerate", "(", "self", ".", "reactor_mod", ".", "output_species_list", ")", ":", "output_index", "=", "i", "*", "self", ".", "reactor_mod", ".", "num_output_species", "+", "j", "output", "+=", "'{0:<15}{1:>15.3e}{2:>15.3e}{3:>15.3f}\\n'", ".", "format", "(", "output_species", ".", "to_chemkin", "(", ")", ",", "mean", "[", "output_index", "]", ",", "stddev", "[", "output_index", "]", ",", "stddev_percent", "[", "output_index", "]", ")", "output", "+=", "'============================================================\\n\\n'", "if", "self", ".", "reactor_mod", ".", "k_params", ":", "output", "+=", "\"\"\"====================================================================================================\nCondition {0} Reaction Rate Sensitivity Indices\n----------------------------------------------------------------------------------------------------\nDescription sens_main sens_total\n\"\"\"", ".", "format", "(", "i", "+", "1", ")", "for", "j", ",", "output_species", "in", "enumerate", "(", "self", ".", "reactor_mod", ".", "output_species_list", ")", ":", "output", "+=", "'----------------------------------------------------------------------------------------------------\\n'", "output_index", "=", "i", "*", "self", ".", "reactor_mod", ".", "num_output_species", "+", "j", "for", "k", ",", "descriptor", "in", "enumerate", "(", "self", ".", "reactor_mod", ".", "k_params", ")", ":", "parameter_index", "=", "k", "if", "not", "self", ".", "reactor_mod", ".", "correlated", ":", "description", "=", "'dln[{0}]/dln[{1}]'", ".", "format", "(", "output_species", ".", "to_chemkin", "(", ")", ",", "self", ".", "reactor_mod", ".", "cantera", ".", "reaction_list", "[", "descriptor", "]", ".", "to_chemkin", "(", "kinetics", "=", "False", ")", ")", "else", ":", "description", "=", "'dln[{0}]/dln[{1}]'", ".", "format", "(", "output_species", ".", "to_chemkin", "(", ")", ",", "descriptor", ")", "output", "+=", "'{0:<70}{1:>14.3f}%{2:>14.3f}%\\n'", ".", "format", "(", "description", ",", "100", "*", "main_sens", "[", "output_index", "]", "[", "parameter_index", "]", ",", "100", "*", "total_sens", "[", "output_index", "]", "[", "parameter_index", "]", ")", "output", "+=", "'====================================================================================================\\n\\n'", "if", "self", ".", "reactor_mod", ".", "g_params", ":", "output", "+=", "\"\"\"====================================================================================================\nCondition {0} Thermochemistry Sensitivity Indices\n----------------------------------------------------------------------------------------------------\nDescription sens_main sens_total\n\"\"\"", ".", "format", "(", "i", "+", "1", ")", "for", "j", ",", "output_species", "in", "enumerate", "(", "self", ".", "reactor_mod", ".", "output_species_list", ")", ":", "output", "+=", "'----------------------------------------------------------------------------------------------------\\n'", "output_index", "=", "i", "*", "self", ".", "reactor_mod", ".", "num_output_species", "+", "j", "for", "g", ",", "descriptor", "in", "enumerate", "(", "self", ".", "reactor_mod", ".", "g_params", ")", ":", "parameter_index", "=", "len", "(", "self", ".", "reactor_mod", ".", "k_params", ")", "+", "g", "if", "not", "self", ".", "reactor_mod", ".", "correlated", ":", "description", "=", "'dln[{0}]/dG[{1}]'", ".", "format", "(", "output_species", ".", "to_chemkin", "(", ")", ",", "self", ".", "reactor_mod", ".", "cantera", ".", "species_list", "[", "descriptor", "]", ".", "to_chemkin", "(", ")", ")", "else", ":", "description", "=", "'dln[{0}]/dG[{1}]'", ".", "format", "(", "output_species", ".", "to_chemkin", "(", ")", ",", "descriptor", ")", "output", "+=", "'{0:<70}{1:>14.3f}%{2:>14.3f}%\\n'", ".", "format", "(", "description", ",", "100", "*", "main_sens", "[", "output_index", "]", "[", "parameter_index", "]", ",", "100", "*", "total_sens", "[", "output_index", "]", "[", "parameter_index", "]", ")", "output", "+=", "'====================================================================================================\\n\\n'", "if", "log", ":", "logging", ".", "info", "(", "output", ")", "else", ":", "print", "(", "output", ")", "return", "mean", ",", "var", ",", "cov", ",", "main_sens", ",", "total_sens" ]
https://github.com/ReactionMechanismGenerator/RMG-Py/blob/2b7baf51febf27157def58fb3f6cee03fb6a684c/rmgpy/tools/globaluncertainty.py#L417-L508
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/evaluation/scoring.py
python
split_by_iterations
(res)
return ress
Split ExperimentResults of a multiple iteratation test into a list of ExperimentResults, one for each iteration.
Split ExperimentResults of a multiple iteratation test into a list of ExperimentResults, one for each iteration.
[ "Split", "ExperimentResults", "of", "a", "multiple", "iteratation", "test", "into", "a", "list", "of", "ExperimentResults", "one", "for", "each", "iteration", "." ]
def split_by_iterations(res): """Split ExperimentResults of a multiple iteratation test into a list of ExperimentResults, one for each iteration. """ if res.number_of_iterations < 2: return [res] ress = [Orange.evaluation.testing.ExperimentResults( 1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded, test_type=res.test_type, labels=res.labels) for _ in range(res.number_of_iterations)] for te in res.results: ress[te.iteration_number].results.append(te) return ress
[ "def", "split_by_iterations", "(", "res", ")", ":", "if", "res", ".", "number_of_iterations", "<", "2", ":", "return", "[", "res", "]", "ress", "=", "[", "Orange", ".", "evaluation", ".", "testing", ".", "ExperimentResults", "(", "1", ",", "res", ".", "classifier_names", ",", "res", ".", "class_values", ",", "res", ".", "weights", ",", "classifiers", "=", "res", ".", "classifiers", ",", "loaded", "=", "res", ".", "loaded", ",", "test_type", "=", "res", ".", "test_type", ",", "labels", "=", "res", ".", "labels", ")", "for", "_", "in", "range", "(", "res", ".", "number_of_iterations", ")", "]", "for", "te", "in", "res", ".", "results", ":", "ress", "[", "te", ".", "iteration_number", "]", ".", "results", ".", "append", "(", "te", ")", "return", "ress" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/evaluation/scoring.py#L84-L98
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/nova/nova/virt/xenapi/driver.py
python
XenAPIDriver.inject_file
(self, instance, b64_path, b64_contents)
Create a file on the VM instance. The file path and contents should be base64-encoded.
Create a file on the VM instance. The file path and contents should be base64-encoded.
[ "Create", "a", "file", "on", "the", "VM", "instance", ".", "The", "file", "path", "and", "contents", "should", "be", "base64", "-", "encoded", "." ]
def inject_file(self, instance, b64_path, b64_contents): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ self._vmops.inject_file(instance, b64_path, b64_contents)
[ "def", "inject_file", "(", "self", ",", "instance", ",", "b64_path", ",", "b64_contents", ")", ":", "self", ".", "_vmops", ".", "inject_file", "(", "instance", ",", "b64_path", ",", "b64_contents", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/nova/nova/virt/xenapi/driver.py#L212-L216
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_route.py
python
OpenShiftCLI._create
(self, fname)
return self.openshift_cmd(['create', '-f', fname])
call oc create on a filename
call oc create on a filename
[ "call", "oc", "create", "on", "a", "filename" ]
def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname])
[ "def", "_create", "(", "self", ",", "fname", ")", ":", "return", "self", ".", "openshift_cmd", "(", "[", "'create'", ",", "'-f'", ",", "fname", "]", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_route.py#L1003-L1005
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
examples/svm/plot_iris_svc.py
python
plot_contours
(ax, clf, xx, yy, **params)
return out
Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional
Plot the decision boundaries for a classifier.
[ "Plot", "the", "decision", "boundaries", "for", "a", "classifier", "." ]
def plot_contours(ax, clf, xx, yy, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, **params) return out
[ "def", "plot_contours", "(", "ax", ",", "clf", ",", "xx", ",", "yy", ",", "*", "*", "params", ")", ":", "Z", "=", "clf", ".", "predict", "(", "np", ".", "c_", "[", "xx", ".", "ravel", "(", ")", ",", "yy", ".", "ravel", "(", ")", "]", ")", "Z", "=", "Z", ".", "reshape", "(", "xx", ".", "shape", ")", "out", "=", "ax", ".", "contourf", "(", "xx", ",", "yy", ",", "Z", ",", "*", "*", "params", ")", "return", "out" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/examples/svm/plot_iris_svc.py#L61-L75
digidotcom/xbee-python
0757f4be0017530c205175fbee8f9f61be9614d1
digi/xbee/devices.py
python
DigiMeshNetwork.__init__
(self, device)
Class constructor. Instantiates a new `DigiMeshNetwork`. Args: device (:class:`.DigiMeshDevice`): Local DigiMesh node to get the network from. Raises: ValueError: If `device` is `None`.
Class constructor. Instantiates a new `DigiMeshNetwork`.
[ "Class", "constructor", ".", "Instantiates", "a", "new", "DigiMeshNetwork", "." ]
def __init__(self, device): """ Class constructor. Instantiates a new `DigiMeshNetwork`. Args: device (:class:`.DigiMeshDevice`): Local DigiMesh node to get the network from. Raises: ValueError: If `device` is `None`. """ super().__init__(device) self.__saved_no = None self.__saved_so = None self.__sync_sleep_enabled = False # Calculated timeout based on the 'N?' value of the local XBee and the # sleep configuration of the network. self.__real_node_timeout = None # Dictionary to store the neighbor find processes per node, so they # can be stop when required. # The dictionary uses as key the 64-bit address string representation (to be thread-safe) self.__neighbor_finders = {}
[ "def", "__init__", "(", "self", ",", "device", ")", ":", "super", "(", ")", ".", "__init__", "(", "device", ")", "self", ".", "__saved_no", "=", "None", "self", ".", "__saved_so", "=", "None", "self", ".", "__sync_sleep_enabled", "=", "False", "# Calculated timeout based on the 'N?' value of the local XBee and the", "# sleep configuration of the network.", "self", ".", "__real_node_timeout", "=", "None", "# Dictionary to store the neighbor find processes per node, so they", "# can be stop when required.", "# The dictionary uses as key the 64-bit address string representation (to be thread-safe)", "self", ".", "__neighbor_finders", "=", "{", "}" ]
https://github.com/digidotcom/xbee-python/blob/0757f4be0017530c205175fbee8f9f61be9614d1/digi/xbee/devices.py#L11620-L11645
JacquesLucke/animation_nodes
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
animation_nodes/auto_load.py
python
iter_my_classes
(modules)
[]
def iter_my_classes(modules): base_types = get_register_base_types() for cls in get_classes_in_modules(modules): if any(base in base_types for base in cls.__bases__): if not getattr(cls, "is_registered", False): yield cls
[ "def", "iter_my_classes", "(", "modules", ")", ":", "base_types", "=", "get_register_base_types", "(", ")", "for", "cls", "in", "get_classes_in_modules", "(", "modules", ")", ":", "if", "any", "(", "base", "in", "base_types", "for", "base", "in", "cls", ".", "__bases__", ")", ":", "if", "not", "getattr", "(", "cls", ",", "\"is_registered\"", ",", "False", ")", ":", "yield", "cls" ]
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/auto_load.py#L106-L111
shlomif/PySolFC
780c399e6f68a95916d84e7e88a067e8fcbec1cc
pysollib/game/__init__.py
python
Game._calcMouseBind
(self, binding_format)
return self.app.opt.calcCustomMouseButtonsBinding(binding_format)
docstring for _calcMouseBind
docstring for _calcMouseBind
[ "docstring", "for", "_calcMouseBind" ]
def _calcMouseBind(self, binding_format): """docstring for _calcMouseBind""" return self.app.opt.calcCustomMouseButtonsBinding(binding_format)
[ "def", "_calcMouseBind", "(", "self", ",", "binding_format", ")", ":", "return", "self", ".", "app", ".", "opt", ".", "calcCustomMouseButtonsBinding", "(", "binding_format", ")" ]
https://github.com/shlomif/PySolFC/blob/780c399e6f68a95916d84e7e88a067e8fcbec1cc/pysollib/game/__init__.py#L677-L679
bgshih/aster
0c74b9b9cd22a021c1d57b75a3041bcbd7ace3cc
trainer.py
python
train
(create_tensor_dict_fn_list, create_model_fn, train_config, master, task, num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name, is_chief, train_dir)
Training function for models. Args: create_tensor_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel and generates losses. train_config: a train_pb2.TrainConfig protobuf. master: BNS name of the TensorFlow master to use. task: The task id of this training instance. num_clones: The number of clones to run per machine. worker_replicas: The number of work replicas to train with. clone_on_cpu: True if clones should be forced to run on CPU. ps_tasks: Number of parameter server tasks. worker_job_name: Name of the worker job. is_chief: Whether this replica is the chief replica. train_dir: Directory to write checkpoints and training summaries to.
Training function for models. Args: create_tensor_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel and generates losses. train_config: a train_pb2.TrainConfig protobuf. master: BNS name of the TensorFlow master to use. task: The task id of this training instance. num_clones: The number of clones to run per machine. worker_replicas: The number of work replicas to train with. clone_on_cpu: True if clones should be forced to run on CPU. ps_tasks: Number of parameter server tasks. worker_job_name: Name of the worker job. is_chief: Whether this replica is the chief replica. train_dir: Directory to write checkpoints and training summaries to.
[ "Training", "function", "for", "models", ".", "Args", ":", "create_tensor_dict_fn", ":", "a", "function", "to", "create", "a", "tensor", "input", "dictionary", ".", "create_model_fn", ":", "a", "function", "that", "creates", "a", "DetectionModel", "and", "generates", "losses", ".", "train_config", ":", "a", "train_pb2", ".", "TrainConfig", "protobuf", ".", "master", ":", "BNS", "name", "of", "the", "TensorFlow", "master", "to", "use", ".", "task", ":", "The", "task", "id", "of", "this", "training", "instance", ".", "num_clones", ":", "The", "number", "of", "clones", "to", "run", "per", "machine", ".", "worker_replicas", ":", "The", "number", "of", "work", "replicas", "to", "train", "with", ".", "clone_on_cpu", ":", "True", "if", "clones", "should", "be", "forced", "to", "run", "on", "CPU", ".", "ps_tasks", ":", "Number", "of", "parameter", "server", "tasks", ".", "worker_job_name", ":", "Name", "of", "the", "worker", "job", ".", "is_chief", ":", "Whether", "this", "replica", "is", "the", "chief", "replica", ".", "train_dir", ":", "Directory", "to", "write", "checkpoints", "and", "training", "summaries", "to", "." ]
def train(create_tensor_dict_fn_list, create_model_fn, train_config, master, task, num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name, is_chief, train_dir): """Training function for models. Args: create_tensor_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel and generates losses. train_config: a train_pb2.TrainConfig protobuf. master: BNS name of the TensorFlow master to use. task: The task id of this training instance. num_clones: The number of clones to run per machine. worker_replicas: The number of work replicas to train with. clone_on_cpu: True if clones should be forced to run on CPU. ps_tasks: Number of parameter server tasks. worker_job_name: Name of the worker job. is_chief: Whether this replica is the chief replica. train_dir: Directory to write checkpoints and training summaries to. """ data_augmentation_options = [ preprocessor_builder.build(step) for step in train_config.data_augmentation_options ] with tf.Graph().as_default(): # Build a configuration specifying multi-GPU and multi-replicas. deploy_config = model_deploy.DeploymentConfig( num_clones=num_clones, clone_on_cpu=clone_on_cpu, replica_id=task, num_replicas=worker_replicas, num_ps_tasks=ps_tasks, worker_job_name=worker_job_name ) # Place the global step on the device storing the variables. with tf.device(deploy_config.variables_device()): global_step = tf.train.create_global_step() with tf.device(deploy_config.inputs_device()), \ tf.name_scope('Input'): input_queue_list = [] for i, create_tensor_dict_fn in enumerate(create_tensor_dict_fn_list): input_queue_list.append(_create_input_queue( train_config.batch_size[i] // num_clones, create_tensor_dict_fn, train_config.batch_queue_capacity, train_config.num_batch_queue_threads, train_config.prefetch_queue_capacity, data_augmentation_options )) # Gather initial summaries. summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) global_summaries = set([]) model_fn = functools.partial(_create_losses, create_model_fn=create_model_fn) clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue_list]) first_clone_scope = clones[0].scope # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) with tf.device(deploy_config.optimizer_device()), \ tf.name_scope('Optimizer'): training_optimizer = optimizer_builder.build( train_config.optimizer, global_summaries ) sync_optimizer = None if train_config.sync_replicas: training_optimizer = tf.train.SyncReplicasOptimizer( training_optimizer, replicas_to_aggregate=train_config.replicas_to_aggregate, total_num_replicas=train_config.worker_replicas ) sync_optimizer = training_optimizer # Create ops required to initialize the model from a given checkpoint. init_fn = None if train_config.fine_tune_checkpoint: var_map = detection_model.restore_map( from_detection_checkpoint=train_config.from_detection_checkpoint ) available_var_map = variables_helper.get_variables_available_in_checkpoint( var_map, train_config.fine_tune_checkpoint ) init_saver = tf.train.Saver(available_var_map) def initializer_fn(sess): init_saver.restore(sess, train_config.fine_tune_checkpoint) init_fn = initializer_fn with tf.device(deploy_config.optimizer_device()), \ tf.variable_scope('OptimizeClones'): total_loss, grads_and_vars = model_deploy.optimize_clones( clones, training_optimizer, regularization_losses=None ) total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') # Optionally multiply bias gradients by train_config.bias_grad_multiplier. if train_config.bias_grad_multiplier: biases_regex_list = [r'.*bias(?:es)?', r'.*beta'] grads_and_vars = variables_helper.multiply_gradients_matching_regex( grads_and_vars, biases_regex_list, multiplier=train_config.bias_grad_multiplier ) # Optionally freeze some layers by setting their gradients to be zero. if train_config.freeze_variables: grads_and_vars = variables_helper.freeze_gradients_matching_regex( grads_and_vars, train_config.freeze_variables ) # Optionally clip gradients if train_config.gradient_clipping_by_norm > 0: with tf.name_scope('clip_grads'): grads_and_vars = tf.contrib.training.clip_gradient_norms( grads_and_vars, train_config.gradient_clipping_by_norm ) # Create gradient updates. grad_updates = training_optimizer.apply_gradients( grads_and_vars, global_step=global_step ) update_ops.append(grad_updates) update_op = tf.group(*update_ops) with tf.control_dependencies([update_op]): train_tensor = tf.identity(total_loss, name='train_op') # Add summaries. for (grad, var) in grads_and_vars: var_name = var.op.name grad_name = 'grad/' + var_name global_summaries.add(tf.summary.histogram(grad_name, grad)) global_summaries.add(tf.summary.histogram(var_name, var)) # for model_var in tf.contrib.framework.get_model_variables(): # global_summaries.add(tf.summary.histogram(model_var.op.name, model_var)) for loss_tensor in tf.losses.get_losses(): global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor)) global_summaries.add( tf.summary.scalar('TotalLoss', tf.losses.get_total_loss()) ) # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) summaries |= global_summaries # Merge all summaries together. summary_op = tf.summary.merge(list(summaries), name='summary_op') # Soft placement allows placing on CPU ops without GPU implementation. session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) # Save checkpoints regularly. keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours saver = tf.train.Saver( keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours ) scaffold = tf.train.Scaffold( init_fn=init_fn, summary_op=summary_op, saver=saver ) stop_hook = tf.train.StopAtStepHook( num_steps=(train_config.num_steps if train_config.num_steps else None), ) profile_hook = profile_session_run_hooks.ProfileAtStepHook( at_step=200, checkpoint_dir=train_dir) tf.contrib.training.train( train_tensor, train_dir, master=master, is_chief=is_chief, scaffold=scaffold, hooks=[stop_hook, profile_hook], chief_only_hooks=None, save_checkpoint_secs=train_config.save_checkpoint_secs, save_summaries_steps=train_config.save_summaries_steps, config=session_config )
[ "def", "train", "(", "create_tensor_dict_fn_list", ",", "create_model_fn", ",", "train_config", ",", "master", ",", "task", ",", "num_clones", ",", "worker_replicas", ",", "clone_on_cpu", ",", "ps_tasks", ",", "worker_job_name", ",", "is_chief", ",", "train_dir", ")", ":", "data_augmentation_options", "=", "[", "preprocessor_builder", ".", "build", "(", "step", ")", "for", "step", "in", "train_config", ".", "data_augmentation_options", "]", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "# Build a configuration specifying multi-GPU and multi-replicas.", "deploy_config", "=", "model_deploy", ".", "DeploymentConfig", "(", "num_clones", "=", "num_clones", ",", "clone_on_cpu", "=", "clone_on_cpu", ",", "replica_id", "=", "task", ",", "num_replicas", "=", "worker_replicas", ",", "num_ps_tasks", "=", "ps_tasks", ",", "worker_job_name", "=", "worker_job_name", ")", "# Place the global step on the device storing the variables.", "with", "tf", ".", "device", "(", "deploy_config", ".", "variables_device", "(", ")", ")", ":", "global_step", "=", "tf", ".", "train", ".", "create_global_step", "(", ")", "with", "tf", ".", "device", "(", "deploy_config", ".", "inputs_device", "(", ")", ")", ",", "tf", ".", "name_scope", "(", "'Input'", ")", ":", "input_queue_list", "=", "[", "]", "for", "i", ",", "create_tensor_dict_fn", "in", "enumerate", "(", "create_tensor_dict_fn_list", ")", ":", "input_queue_list", ".", "append", "(", "_create_input_queue", "(", "train_config", ".", "batch_size", "[", "i", "]", "//", "num_clones", ",", "create_tensor_dict_fn", ",", "train_config", ".", "batch_queue_capacity", ",", "train_config", ".", "num_batch_queue_threads", ",", "train_config", ".", "prefetch_queue_capacity", ",", "data_augmentation_options", ")", ")", "# Gather initial summaries.", "summaries", "=", "set", "(", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "SUMMARIES", ")", ")", "global_summaries", "=", "set", "(", "[", "]", ")", "model_fn", "=", "functools", ".", "partial", "(", "_create_losses", ",", "create_model_fn", "=", "create_model_fn", ")", "clones", "=", "model_deploy", ".", "create_clones", "(", "deploy_config", ",", "model_fn", ",", "[", "input_queue_list", "]", ")", "first_clone_scope", "=", "clones", "[", "0", "]", ".", "scope", "# Gather update_ops from the first clone. These contain, for example,", "# the updates for the batch_norm variables created by model_fn.", "update_ops", "=", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "UPDATE_OPS", ",", "first_clone_scope", ")", "with", "tf", ".", "device", "(", "deploy_config", ".", "optimizer_device", "(", ")", ")", ",", "tf", ".", "name_scope", "(", "'Optimizer'", ")", ":", "training_optimizer", "=", "optimizer_builder", ".", "build", "(", "train_config", ".", "optimizer", ",", "global_summaries", ")", "sync_optimizer", "=", "None", "if", "train_config", ".", "sync_replicas", ":", "training_optimizer", "=", "tf", ".", "train", ".", "SyncReplicasOptimizer", "(", "training_optimizer", ",", "replicas_to_aggregate", "=", "train_config", ".", "replicas_to_aggregate", ",", "total_num_replicas", "=", "train_config", ".", "worker_replicas", ")", "sync_optimizer", "=", "training_optimizer", "# Create ops required to initialize the model from a given checkpoint.", "init_fn", "=", "None", "if", "train_config", ".", "fine_tune_checkpoint", ":", "var_map", "=", "detection_model", ".", "restore_map", "(", "from_detection_checkpoint", "=", "train_config", ".", "from_detection_checkpoint", ")", "available_var_map", "=", "variables_helper", ".", "get_variables_available_in_checkpoint", "(", "var_map", ",", "train_config", ".", "fine_tune_checkpoint", ")", "init_saver", "=", "tf", ".", "train", ".", "Saver", "(", "available_var_map", ")", "def", "initializer_fn", "(", "sess", ")", ":", "init_saver", ".", "restore", "(", "sess", ",", "train_config", ".", "fine_tune_checkpoint", ")", "init_fn", "=", "initializer_fn", "with", "tf", ".", "device", "(", "deploy_config", ".", "optimizer_device", "(", ")", ")", ",", "tf", ".", "variable_scope", "(", "'OptimizeClones'", ")", ":", "total_loss", ",", "grads_and_vars", "=", "model_deploy", ".", "optimize_clones", "(", "clones", ",", "training_optimizer", ",", "regularization_losses", "=", "None", ")", "total_loss", "=", "tf", ".", "check_numerics", "(", "total_loss", ",", "'LossTensor is inf or nan.'", ")", "# Optionally multiply bias gradients by train_config.bias_grad_multiplier.", "if", "train_config", ".", "bias_grad_multiplier", ":", "biases_regex_list", "=", "[", "r'.*bias(?:es)?'", ",", "r'.*beta'", "]", "grads_and_vars", "=", "variables_helper", ".", "multiply_gradients_matching_regex", "(", "grads_and_vars", ",", "biases_regex_list", ",", "multiplier", "=", "train_config", ".", "bias_grad_multiplier", ")", "# Optionally freeze some layers by setting their gradients to be zero.", "if", "train_config", ".", "freeze_variables", ":", "grads_and_vars", "=", "variables_helper", ".", "freeze_gradients_matching_regex", "(", "grads_and_vars", ",", "train_config", ".", "freeze_variables", ")", "# Optionally clip gradients", "if", "train_config", ".", "gradient_clipping_by_norm", ">", "0", ":", "with", "tf", ".", "name_scope", "(", "'clip_grads'", ")", ":", "grads_and_vars", "=", "tf", ".", "contrib", ".", "training", ".", "clip_gradient_norms", "(", "grads_and_vars", ",", "train_config", ".", "gradient_clipping_by_norm", ")", "# Create gradient updates.", "grad_updates", "=", "training_optimizer", ".", "apply_gradients", "(", "grads_and_vars", ",", "global_step", "=", "global_step", ")", "update_ops", ".", "append", "(", "grad_updates", ")", "update_op", "=", "tf", ".", "group", "(", "*", "update_ops", ")", "with", "tf", ".", "control_dependencies", "(", "[", "update_op", "]", ")", ":", "train_tensor", "=", "tf", ".", "identity", "(", "total_loss", ",", "name", "=", "'train_op'", ")", "# Add summaries.", "for", "(", "grad", ",", "var", ")", "in", "grads_and_vars", ":", "var_name", "=", "var", ".", "op", ".", "name", "grad_name", "=", "'grad/'", "+", "var_name", "global_summaries", ".", "add", "(", "tf", ".", "summary", ".", "histogram", "(", "grad_name", ",", "grad", ")", ")", "global_summaries", ".", "add", "(", "tf", ".", "summary", ".", "histogram", "(", "var_name", ",", "var", ")", ")", "# for model_var in tf.contrib.framework.get_model_variables():", "# global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))", "for", "loss_tensor", "in", "tf", ".", "losses", ".", "get_losses", "(", ")", ":", "global_summaries", ".", "add", "(", "tf", ".", "summary", ".", "scalar", "(", "loss_tensor", ".", "op", ".", "name", ",", "loss_tensor", ")", ")", "global_summaries", ".", "add", "(", "tf", ".", "summary", ".", "scalar", "(", "'TotalLoss'", ",", "tf", ".", "losses", ".", "get_total_loss", "(", ")", ")", ")", "# Add the summaries from the first clone. These contain the summaries", "# created by model_fn and either optimize_clones() or _gather_clone_loss().", "summaries", "|=", "set", "(", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "SUMMARIES", ",", "first_clone_scope", ")", ")", "summaries", "|=", "global_summaries", "# Merge all summaries together.", "summary_op", "=", "tf", ".", "summary", ".", "merge", "(", "list", "(", "summaries", ")", ",", "name", "=", "'summary_op'", ")", "# Soft placement allows placing on CPU ops without GPU implementation.", "session_config", "=", "tf", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ",", "log_device_placement", "=", "False", ")", "# Save checkpoints regularly.", "keep_checkpoint_every_n_hours", "=", "train_config", ".", "keep_checkpoint_every_n_hours", "saver", "=", "tf", ".", "train", ".", "Saver", "(", "keep_checkpoint_every_n_hours", "=", "keep_checkpoint_every_n_hours", ")", "scaffold", "=", "tf", ".", "train", ".", "Scaffold", "(", "init_fn", "=", "init_fn", ",", "summary_op", "=", "summary_op", ",", "saver", "=", "saver", ")", "stop_hook", "=", "tf", ".", "train", ".", "StopAtStepHook", "(", "num_steps", "=", "(", "train_config", ".", "num_steps", "if", "train_config", ".", "num_steps", "else", "None", ")", ",", ")", "profile_hook", "=", "profile_session_run_hooks", ".", "ProfileAtStepHook", "(", "at_step", "=", "200", ",", "checkpoint_dir", "=", "train_dir", ")", "tf", ".", "contrib", ".", "training", ".", "train", "(", "train_tensor", ",", "train_dir", ",", "master", "=", "master", ",", "is_chief", "=", "is_chief", ",", "scaffold", "=", "scaffold", ",", "hooks", "=", "[", "stop_hook", ",", "profile_hook", "]", ",", "chief_only_hooks", "=", "None", ",", "save_checkpoint_secs", "=", "train_config", ".", "save_checkpoint_secs", ",", "save_summaries_steps", "=", "train_config", ".", "save_summaries_steps", ",", "config", "=", "session_config", ")" ]
https://github.com/bgshih/aster/blob/0c74b9b9cd22a021c1d57b75a3041bcbd7ace3cc/trainer.py#L77-L269
worthwhile/django-herald
c0b0f4e894b26ec46647d1c70a6a62a89f34b06a
herald/base.py
python
NotificationBase.get_extra_data
(self)
return {}
Returns a dictionary of extra data to be stored, and used for sending. MUST BE JSON SERIALIZABLE
Returns a dictionary of extra data to be stored, and used for sending. MUST BE JSON SERIALIZABLE
[ "Returns", "a", "dictionary", "of", "extra", "data", "to", "be", "stored", "and", "used", "for", "sending", ".", "MUST", "BE", "JSON", "SERIALIZABLE" ]
def get_extra_data(self): """ Returns a dictionary of extra data to be stored, and used for sending. MUST BE JSON SERIALIZABLE """ return {}
[ "def", "get_extra_data", "(", "self", ")", ":", "return", "{", "}" ]
https://github.com/worthwhile/django-herald/blob/c0b0f4e894b26ec46647d1c70a6a62a89f34b06a/herald/base.py#L140-L146
smicallef/spiderfoot
fd4bf9394c9ab3ecc90adc3115c56349fb23165b
modules/sfp_subdomain_takeover.py
python
sfp_subdomain_takeover.watchedEvents
(self)
return ["AFFILIATE_INTERNET_NAME", "AFFILIATE_INTERNET_NAME_UNRESOLVED"]
[]
def watchedEvents(self): return ["AFFILIATE_INTERNET_NAME", "AFFILIATE_INTERNET_NAME_UNRESOLVED"]
[ "def", "watchedEvents", "(", "self", ")", ":", "return", "[", "\"AFFILIATE_INTERNET_NAME\"", ",", "\"AFFILIATE_INTERNET_NAME_UNRESOLVED\"", "]" ]
https://github.com/smicallef/spiderfoot/blob/fd4bf9394c9ab3ecc90adc3115c56349fb23165b/modules/sfp_subdomain_takeover.py#L72-L73
missionpinball/mpf
8e6b74cff4ba06d2fec9445742559c1068b88582
mpf/core/ball_controller.py
python
BallController._collecting_balls_complete
(self)
event: collecting_balls_complete desc: Posted by the ball controller when it has finished the collecting balls process.
event: collecting_balls_complete
[ "event", ":", "collecting_balls_complete" ]
def _collecting_balls_complete(self) -> None: self.machine.events.remove_handler(self._collecting_balls_entered_callback) self.machine.events.post('collecting_balls_complete') '''event: collecting_balls_complete desc: Posted by the ball controller when it has finished the collecting balls process. '''
[ "def", "_collecting_balls_complete", "(", "self", ")", "->", "None", ":", "self", ".", "machine", ".", "events", ".", "remove_handler", "(", "self", ".", "_collecting_balls_entered_callback", ")", "self", ".", "machine", ".", "events", ".", "post", "(", "'collecting_balls_complete'", ")" ]
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/core/ball_controller.py#L354-L362
deepfakes/faceswap
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
lib/gui/custom_widgets.py
python
ContextMenu.cm_bind
(self)
Bind the menu to the given widgets Right Click event After associating a widget with this :class:`ContextMenu` this function should be called to bind it to the right click button
Bind the menu to the given widgets Right Click event
[ "Bind", "the", "menu", "to", "the", "given", "widgets", "Right", "Click", "event" ]
def cm_bind(self): """ Bind the menu to the given widgets Right Click event After associating a widget with this :class:`ContextMenu` this function should be called to bind it to the right click button """ button = "<Button-2>" if platform.system() == "Darwin" else "<Button-3>" logger.debug("Binding '%s' to '%s'", button, self._widget.winfo_class()) self._widget.bind(button, lambda event: self.tk_popup(event.x_root, event.y_root))
[ "def", "cm_bind", "(", "self", ")", ":", "button", "=", "\"<Button-2>\"", "if", "platform", ".", "system", "(", ")", "==", "\"Darwin\"", "else", "\"<Button-3>\"", "logger", ".", "debug", "(", "\"Binding '%s' to '%s'\"", ",", "button", ",", "self", ".", "_widget", ".", "winfo_class", "(", ")", ")", "self", ".", "_widget", ".", "bind", "(", "button", ",", "lambda", "event", ":", "self", ".", "tk_popup", "(", "event", ".", "x_root", ",", "event", ".", "y_root", ")", ")" ]
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/lib/gui/custom_widgets.py#L53-L61
mgrankin/ru_transformers
e698092ed31b340dc39ecea26f1b0be87cdbd022
sp_encoder.py
python
SPEncoder.convert_tokens_to_ids
(self, tokens)
return [self.sp.PieceToId(token) for token in tokens]
[]
def convert_tokens_to_ids(self, tokens): return [self.sp.PieceToId(token) for token in tokens]
[ "def", "convert_tokens_to_ids", "(", "self", ",", "tokens", ")", ":", "return", "[", "self", ".", "sp", ".", "PieceToId", "(", "token", ")", "for", "token", "in", "tokens", "]" ]
https://github.com/mgrankin/ru_transformers/blob/e698092ed31b340dc39ecea26f1b0be87cdbd022/sp_encoder.py#L53-L54
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
rpython/annotator/classdesc.py
python
ClassDesc.mergeattrfamilies
(self, others, attrname)
return changed
Merge the attr families of the given Descs into one.
Merge the attr families of the given Descs into one.
[ "Merge", "the", "attr", "families", "of", "the", "given", "Descs", "into", "one", "." ]
def mergeattrfamilies(self, others, attrname): """Merge the attr families of the given Descs into one.""" access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) changed, rep, attrfamily = access_sets.find(self) for desc in others: changed1, rep, attrfamily = access_sets.union(rep, desc) changed = changed or changed1 return changed
[ "def", "mergeattrfamilies", "(", "self", ",", "others", ",", "attrname", ")", ":", "access_sets", "=", "self", ".", "bookkeeper", ".", "get_classpbc_attr_families", "(", "attrname", ")", "changed", ",", "rep", ",", "attrfamily", "=", "access_sets", ".", "find", "(", "self", ")", "for", "desc", "in", "others", ":", "changed1", ",", "rep", ",", "attrfamily", "=", "access_sets", ".", "union", "(", "rep", ",", "desc", ")", "changed", "=", "changed", "or", "changed1", "return", "changed" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/rpython/annotator/classdesc.py#L935-L942
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/scheduler/utils.py
python
_log_compute_error
(instance_uuid, retry)
If the request contained an exception from a previous compute build/resize operation, log it to aid debugging
If the request contained an exception from a previous compute build/resize operation, log it to aid debugging
[ "If", "the", "request", "contained", "an", "exception", "from", "a", "previous", "compute", "build", "/", "resize", "operation", "log", "it", "to", "aid", "debugging" ]
def _log_compute_error(instance_uuid, retry): """If the request contained an exception from a previous compute build/resize operation, log it to aid debugging """ exc = retry.get('exc') # string-ified exception from compute if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts', None) if not hosts: return # no previously attempted hosts, skip last_host, last_node = hosts[-1] LOG.error( 'Error from last host: %(last_host)s (node %(last_node)s): %(exc)s', {'last_host': last_host, 'last_node': last_node, 'exc': exc}, instance_uuid=instance_uuid)
[ "def", "_log_compute_error", "(", "instance_uuid", ",", "retry", ")", ":", "exc", "=", "retry", ".", "get", "(", "'exc'", ")", "# string-ified exception from compute", "if", "not", "exc", ":", "return", "# no exception info from a previous attempt, skip", "hosts", "=", "retry", ".", "get", "(", "'hosts'", ",", "None", ")", "if", "not", "hosts", ":", "return", "# no previously attempted hosts, skip", "last_host", ",", "last_node", "=", "hosts", "[", "-", "1", "]", "LOG", ".", "error", "(", "'Error from last host: %(last_host)s (node %(last_node)s): %(exc)s'", ",", "{", "'last_host'", ":", "last_host", ",", "'last_node'", ":", "last_node", ",", "'exc'", ":", "exc", "}", ",", "instance_uuid", "=", "instance_uuid", ")" ]
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/scheduler/utils.py#L992-L1008
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/celery/backends/redis.py
python
RedisBackend.incr
(self, key)
return self.client.incr(key)
[]
def incr(self, key): return self.client.incr(key)
[ "def", "incr", "(", "self", ",", "key", ")", ":", "return", "self", ".", "client", ".", "incr", "(", "key", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/celery/backends/redis.py#L218-L219
flairNLP/flair
b774774752c8338aab3d620f7e5062f66ec7a69d
flair/datasets/entity_linking.py
python
WSD_MASC.__init__
( self, base_path: Union[str, Path] = None, in_memory: bool = True, columns={0: "text", 3: "wn30_key"}, tag_to_bioes=None, label_name_map: Dict[str, str] = None, banned_sentences: List[str] = None, sample_missing_splits: Union[bool, str] = True, cut_multisense: bool = True, use_raganato_ALL_as_test_data: bool = False, )
Initialize MASC (Manually Annotated Sub-Corpus) provided in UFSAC https://github.com/getalp/UFSAC When first initializing the corpus the whole UFSAC data is downloaded.
Initialize MASC (Manually Annotated Sub-Corpus) provided in UFSAC https://github.com/getalp/UFSAC When first initializing the corpus the whole UFSAC data is downloaded.
[ "Initialize", "MASC", "(", "Manually", "Annotated", "Sub", "-", "Corpus", ")", "provided", "in", "UFSAC", "https", ":", "//", "github", ".", "com", "/", "getalp", "/", "UFSAC", "When", "first", "initializing", "the", "corpus", "the", "whole", "UFSAC", "data", "is", "downloaded", "." ]
def __init__( self, base_path: Union[str, Path] = None, in_memory: bool = True, columns={0: "text", 3: "wn30_key"}, tag_to_bioes=None, label_name_map: Dict[str, str] = None, banned_sentences: List[str] = None, sample_missing_splits: Union[bool, str] = True, cut_multisense: bool = True, use_raganato_ALL_as_test_data: bool = False, ): """ Initialize MASC (Manually Annotated Sub-Corpus) provided in UFSAC https://github.com/getalp/UFSAC When first initializing the corpus the whole UFSAC data is downloaded. """ if not base_path: base_path = flair.cache_root / "datasets" else: base_path = Path(base_path) dataset_name = "wsd_ufsac" # default dataset folder is the cache root data_folder = base_path / dataset_name original_data_folder = data_folder / "original_data" # We check if the the UFSAC data has already been downloaded. If not, we download it. # Note that this downloads more datasets than just MASC. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked) if not original_data_folder.exists(): # create folder data_folder.mkdir(parents=True) # download data import gdown url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO" output = data_folder / (dataset_name + ".tar") gdown.download(url, str(output), quiet=False) output = data_folder / (dataset_name + ".tar") unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False) os.rename(data_folder / "ufsac-public-2.1", original_data_folder) if use_raganato_ALL_as_test_data: # in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled. if sample_missing_splits: sample_missing_splits = "only_dev" # generate the test file test_file = determine_tsv_file( filename="raganato_ALL", data_folder=data_folder, cut_multisense=cut_multisense, ) else: test_file = None train_file = determine_tsv_file(filename="masc", data_folder=data_folder, cut_multisense=cut_multisense) super(WSD_MASC, self).__init__( data_folder=data_folder, columns=columns, train_file=train_file, test_file=test_file, in_memory=in_memory, document_separator_token="-DOCSTART-", column_delimiter="\t", autofind_splits=False, tag_to_bioes=tag_to_bioes, label_name_map=label_name_map, banned_sentences=banned_sentences, sample_missing_splits=sample_missing_splits, )
[ "def", "__init__", "(", "self", ",", "base_path", ":", "Union", "[", "str", ",", "Path", "]", "=", "None", ",", "in_memory", ":", "bool", "=", "True", ",", "columns", "=", "{", "0", ":", "\"text\"", ",", "3", ":", "\"wn30_key\"", "}", ",", "tag_to_bioes", "=", "None", ",", "label_name_map", ":", "Dict", "[", "str", ",", "str", "]", "=", "None", ",", "banned_sentences", ":", "List", "[", "str", "]", "=", "None", ",", "sample_missing_splits", ":", "Union", "[", "bool", ",", "str", "]", "=", "True", ",", "cut_multisense", ":", "bool", "=", "True", ",", "use_raganato_ALL_as_test_data", ":", "bool", "=", "False", ",", ")", ":", "if", "not", "base_path", ":", "base_path", "=", "flair", ".", "cache_root", "/", "\"datasets\"", "else", ":", "base_path", "=", "Path", "(", "base_path", ")", "dataset_name", "=", "\"wsd_ufsac\"", "# default dataset folder is the cache root", "data_folder", "=", "base_path", "/", "dataset_name", "original_data_folder", "=", "data_folder", "/", "\"original_data\"", "# We check if the the UFSAC data has already been downloaded. If not, we download it.", "# Note that this downloads more datasets than just MASC. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)", "if", "not", "original_data_folder", ".", "exists", "(", ")", ":", "# create folder", "data_folder", ".", "mkdir", "(", "parents", "=", "True", ")", "# download data", "import", "gdown", "url", "=", "\"https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO\"", "output", "=", "data_folder", "/", "(", "dataset_name", "+", "\".tar\"", ")", "gdown", ".", "download", "(", "url", ",", "str", "(", "output", ")", ",", "quiet", "=", "False", ")", "output", "=", "data_folder", "/", "(", "dataset_name", "+", "\".tar\"", ")", "unpack_file", "(", "file", "=", "output", ",", "unpack_to", "=", "data_folder", ",", "mode", "=", "\"tar\"", ",", "keep", "=", "False", ")", "os", ".", "rename", "(", "data_folder", "/", "\"ufsac-public-2.1\"", ",", "original_data_folder", ")", "if", "use_raganato_ALL_as_test_data", ":", "# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.", "if", "sample_missing_splits", ":", "sample_missing_splits", "=", "\"only_dev\"", "# generate the test file", "test_file", "=", "determine_tsv_file", "(", "filename", "=", "\"raganato_ALL\"", ",", "data_folder", "=", "data_folder", ",", "cut_multisense", "=", "cut_multisense", ",", ")", "else", ":", "test_file", "=", "None", "train_file", "=", "determine_tsv_file", "(", "filename", "=", "\"masc\"", ",", "data_folder", "=", "data_folder", ",", "cut_multisense", "=", "cut_multisense", ")", "super", "(", "WSD_MASC", ",", "self", ")", ".", "__init__", "(", "data_folder", "=", "data_folder", ",", "columns", "=", "columns", ",", "train_file", "=", "train_file", ",", "test_file", "=", "test_file", ",", "in_memory", "=", "in_memory", ",", "document_separator_token", "=", "\"-DOCSTART-\"", ",", "column_delimiter", "=", "\"\\t\"", ",", "autofind_splits", "=", "False", ",", "tag_to_bioes", "=", "tag_to_bioes", ",", "label_name_map", "=", "label_name_map", ",", "banned_sentences", "=", "banned_sentences", ",", "sample_missing_splits", "=", "sample_missing_splits", ",", ")" ]
https://github.com/flairNLP/flair/blob/b774774752c8338aab3d620f7e5062f66ec7a69d/flair/datasets/entity_linking.py#L1617-L1693
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbWuDaoKou.alibaba_price_promotion_activity_query
( self, page, outer_promotion_code, page_size, ou_code='' )
return self._top_request( "alibaba.price.promotion.activity.query", { "page": page, "outer_promotion_code": outer_promotion_code, "page_size": page_size, "ou_code": ou_code } )
查询盒马帮档期活动详情 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=36865 :param page: 页码 :param outer_promotion_code: 外部档期code :param page_size: 页码大小 :param ou_code: TOB店仓编码
查询盒马帮档期活动详情 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=36865
[ "查询盒马帮档期活动详情", "文档地址:https", ":", "//", "open", "-", "doc", ".", "dingtalk", ".", "com", "/", "docs", "/", "api", ".", "htm?apiId", "=", "36865" ]
def alibaba_price_promotion_activity_query( self, page, outer_promotion_code, page_size, ou_code='' ): """ 查询盒马帮档期活动详情 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=36865 :param page: 页码 :param outer_promotion_code: 外部档期code :param page_size: 页码大小 :param ou_code: TOB店仓编码 """ return self._top_request( "alibaba.price.promotion.activity.query", { "page": page, "outer_promotion_code": outer_promotion_code, "page_size": page_size, "ou_code": ou_code } )
[ "def", "alibaba_price_promotion_activity_query", "(", "self", ",", "page", ",", "outer_promotion_code", ",", "page_size", ",", "ou_code", "=", "''", ")", ":", "return", "self", ".", "_top_request", "(", "\"alibaba.price.promotion.activity.query\"", ",", "{", "\"page\"", ":", "page", ",", "\"outer_promotion_code\"", ":", "outer_promotion_code", ",", "\"page_size\"", ":", "page_size", ",", "\"ou_code\"", ":", "ou_code", "}", ")" ]
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L63810-L63834
eoyilmaz/stalker
a35c041b79d953d00dc2a09cf8206956ca269bef
stalker/models/shot.py
python
Shot._validate_fps
(self, fps)
return float(fps)
validates the given fps_in value
validates the given fps_in value
[ "validates", "the", "given", "fps_in", "value" ]
def _validate_fps(self, fps): """validates the given fps_in value """ if fps is None: # fps = self.project.fps return None if not isinstance(fps, (int, float)): raise TypeError( '%s.fps should be a positive float or int, not %s' % ( self.__class__.__name__, fps.__class__.__name__ ) ) fps = float(fps) if fps <= 0: raise ValueError( '%s.fps should be a positive float or int, not %s' % (self.__class__.__name__, fps) ) return float(fps)
[ "def", "_validate_fps", "(", "self", ",", "fps", ")", ":", "if", "fps", "is", "None", ":", "# fps = self.project.fps", "return", "None", "if", "not", "isinstance", "(", "fps", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "'%s.fps should be a positive float or int, not %s'", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "fps", ".", "__class__", ".", "__name__", ")", ")", "fps", "=", "float", "(", "fps", ")", "if", "fps", "<=", "0", ":", "raise", "ValueError", "(", "'%s.fps should be a positive float or int, not %s'", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "fps", ")", ")", "return", "float", "(", "fps", ")" ]
https://github.com/eoyilmaz/stalker/blob/a35c041b79d953d00dc2a09cf8206956ca269bef/stalker/models/shot.py#L350-L371
ukdtom/ExportTools.bundle
49aba4292a2897f640162a833c2792480aa4f0b6
Contents/Libraries/Shared/xlsxwriter/worksheet.py
python
Worksheet.set_print_scale
(self, scale)
Set the scale factor for the printed page. Args: scale: Print scale. 10 <= scale <= 400. Returns: Nothing.
Set the scale factor for the printed page.
[ "Set", "the", "scale", "factor", "for", "the", "printed", "page", "." ]
def set_print_scale(self, scale): """ Set the scale factor for the printed page. Args: scale: Print scale. 10 <= scale <= 400. Returns: Nothing. """ # Confine the scale to Excel's range. if scale < 10 or scale > 400: warn("Print scale '%d' outside range: 10 <= scale <= 400" % scale) return # Turn off "fit to page" option when print scale is on. self.fit_page = 0 self.print_scale = int(scale) self.page_setup_changed = True
[ "def", "set_print_scale", "(", "self", ",", "scale", ")", ":", "# Confine the scale to Excel's range.", "if", "scale", "<", "10", "or", "scale", ">", "400", ":", "warn", "(", "\"Print scale '%d' outside range: 10 <= scale <= 400\"", "%", "scale", ")", "return", "# Turn off \"fit to page\" option when print scale is on.", "self", ".", "fit_page", "=", "0", "self", ".", "print_scale", "=", "int", "(", "scale", ")", "self", ".", "page_setup_changed", "=", "True" ]
https://github.com/ukdtom/ExportTools.bundle/blob/49aba4292a2897f640162a833c2792480aa4f0b6/Contents/Libraries/Shared/xlsxwriter/worksheet.py#L3481-L3501
pikpikcu/Pentest-Tools-Framework
cd6e6107764a809943dc4e073cde8149c1a2cd03
modules/xsser/core/curlcontrol.py
python
Curl.post
(self, cgi, params)
return self.__request(cgi)
Post a url.
Post a url.
[ "Post", "a", "url", "." ]
def post(self, cgi, params): """ Post a url. """ self.set_option(pycurl.POST, 1) self.set_option(pycurl.POSTFIELDS, params) return self.__request(cgi)
[ "def", "post", "(", "self", ",", "cgi", ",", "params", ")", ":", "self", ".", "set_option", "(", "pycurl", ".", "POST", ",", "1", ")", "self", ".", "set_option", "(", "pycurl", ".", "POSTFIELDS", ",", "params", ")", "return", "self", ".", "__request", "(", "cgi", ")" ]
https://github.com/pikpikcu/Pentest-Tools-Framework/blob/cd6e6107764a809943dc4e073cde8149c1a2cd03/modules/xsser/core/curlcontrol.py#L420-L426
EnterpriseDB/barman
487bad92edec72712531ead4746fad72bb310270
barman/clients/cloud_backup_delete.py
python
main
(args=None)
The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:]
The main script entry point
[ "The", "main", "script", "entry", "point" ]
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise SystemExit(1) catalog = CloudBackupCatalog( cloud_interface=cloud_interface, server_name=config.server_name ) # Call catalog.get_backup_list now so we know we can read the whole catalog # (the results are cached so this does not result in extra calls to cloud # storage) catalog.get_backup_list() if len(catalog.unreadable_backups) > 0: logging.error( "Cannot read the following backups: %s\n" "Unsafe to proceed with deletion due to failure reading backup catalog" % catalog.unreadable_backups ) raise SystemExit(1) if config.backup_id: # Because we only care about one backup, skip the annotation cache # because it is only helpful when dealing with multiple backups if catalog.should_keep_backup(config.backup_id, use_cache=False): logging.error( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", config.backup_id, config.server_name, ) raise SystemExit(1) _delete_backup( cloud_interface, catalog, config.backup_id, config.dry_run ) elif config.retention_policy: retention_policy = RetentionPolicyFactory.create( "retention_policy", config.retention_policy, server_name=config.server_name, catalog=catalog, ) # Sort to ensure that we delete the backups in ascending order, that is # from oldest to newest. This ensures that the relevant WALs will be cleaned # up after each backup is deleted. backups_to_delete = sorted( [ backup_id for backup_id, status in retention_policy.report().items() if status == "OBSOLETE" ] ) for backup_id in backups_to_delete: _delete_backup( cloud_interface, catalog, backup_id, config.dry_run, skip_wal_cleanup_if_standalone=False, ) except Exception as exc: logging.error("Barman cloud backup delete exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(1)
[ "def", "main", "(", "args", "=", "None", ")", ":", "config", "=", "parse_arguments", "(", "args", ")", "configure_logging", "(", "config", ")", "try", ":", "cloud_interface", "=", "get_cloud_interface", "(", "config", ")", "with", "closing", "(", "cloud_interface", ")", ":", "if", "not", "cloud_interface", ".", "test_connectivity", "(", ")", ":", "raise", "SystemExit", "(", "1", ")", "# If test is requested, just exit after connectivity test", "elif", "config", ".", "test", ":", "raise", "SystemExit", "(", "0", ")", "if", "not", "cloud_interface", ".", "bucket_exists", ":", "logging", ".", "error", "(", "\"Bucket %s does not exist\"", ",", "cloud_interface", ".", "bucket_name", ")", "raise", "SystemExit", "(", "1", ")", "catalog", "=", "CloudBackupCatalog", "(", "cloud_interface", "=", "cloud_interface", ",", "server_name", "=", "config", ".", "server_name", ")", "# Call catalog.get_backup_list now so we know we can read the whole catalog", "# (the results are cached so this does not result in extra calls to cloud", "# storage)", "catalog", ".", "get_backup_list", "(", ")", "if", "len", "(", "catalog", ".", "unreadable_backups", ")", ">", "0", ":", "logging", ".", "error", "(", "\"Cannot read the following backups: %s\\n\"", "\"Unsafe to proceed with deletion due to failure reading backup catalog\"", "%", "catalog", ".", "unreadable_backups", ")", "raise", "SystemExit", "(", "1", ")", "if", "config", ".", "backup_id", ":", "# Because we only care about one backup, skip the annotation cache", "# because it is only helpful when dealing with multiple backups", "if", "catalog", ".", "should_keep_backup", "(", "config", ".", "backup_id", ",", "use_cache", "=", "False", ")", ":", "logging", ".", "error", "(", "\"Skipping delete of backup %s for server %s \"", "\"as it has a current keep request. If you really \"", "\"want to delete this backup please remove the keep \"", "\"and try again.\"", ",", "config", ".", "backup_id", ",", "config", ".", "server_name", ",", ")", "raise", "SystemExit", "(", "1", ")", "_delete_backup", "(", "cloud_interface", ",", "catalog", ",", "config", ".", "backup_id", ",", "config", ".", "dry_run", ")", "elif", "config", ".", "retention_policy", ":", "retention_policy", "=", "RetentionPolicyFactory", ".", "create", "(", "\"retention_policy\"", ",", "config", ".", "retention_policy", ",", "server_name", "=", "config", ".", "server_name", ",", "catalog", "=", "catalog", ",", ")", "# Sort to ensure that we delete the backups in ascending order, that is", "# from oldest to newest. This ensures that the relevant WALs will be cleaned", "# up after each backup is deleted.", "backups_to_delete", "=", "sorted", "(", "[", "backup_id", "for", "backup_id", ",", "status", "in", "retention_policy", ".", "report", "(", ")", ".", "items", "(", ")", "if", "status", "==", "\"OBSOLETE\"", "]", ")", "for", "backup_id", "in", "backups_to_delete", ":", "_delete_backup", "(", "cloud_interface", ",", "catalog", ",", "backup_id", ",", "config", ".", "dry_run", ",", "skip_wal_cleanup_if_standalone", "=", "False", ",", ")", "except", "Exception", "as", "exc", ":", "logging", ".", "error", "(", "\"Barman cloud backup delete exception: %s\"", ",", "force_str", "(", "exc", ")", ")", "logging", ".", "debug", "(", "\"Exception details:\"", ",", "exc_info", "=", "exc", ")", "raise", "SystemExit", "(", "1", ")" ]
https://github.com/EnterpriseDB/barman/blob/487bad92edec72712531ead4746fad72bb310270/barman/clients/cloud_backup_delete.py#L196-L279
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/Python.py
python
Value.write
(self, built_value)
Set the value of the node.
Set the value of the node.
[ "Set", "the", "value", "of", "the", "node", "." ]
def write(self, built_value): """Set the value of the node.""" self.built_value = built_value
[ "def", "write", "(", "self", ",", "built_value", ")", ":", "self", ".", "built_value", "=", "built_value" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/Python.py#L80-L82
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_vendor/urllib3/util/proxy.py
python
connection_requires_http_tunnel
( proxy_url=None, proxy_config=None, destination_scheme=None )
return True
Returns True if the connection requires an HTTP CONNECT through the proxy. :param URL proxy_url: URL of the proxy. :param ProxyConfig proxy_config: Proxy configuration from poolmanager.py :param str destination_scheme: The scheme of the destination. (i.e https, http, etc)
Returns True if the connection requires an HTTP CONNECT through the proxy.
[ "Returns", "True", "if", "the", "connection", "requires", "an", "HTTP", "CONNECT", "through", "the", "proxy", "." ]
def connection_requires_http_tunnel( proxy_url=None, proxy_config=None, destination_scheme=None ): """ Returns True if the connection requires an HTTP CONNECT through the proxy. :param URL proxy_url: URL of the proxy. :param ProxyConfig proxy_config: Proxy configuration from poolmanager.py :param str destination_scheme: The scheme of the destination. (i.e https, http, etc) """ # If we're not using a proxy, no way to use a tunnel. if proxy_url is None: return False # HTTP destinations never require tunneling, we always forward. if destination_scheme == "http": return False # Support for forwarding with HTTPS proxies and HTTPS destinations. if ( proxy_url.scheme == "https" and proxy_config and proxy_config.use_forwarding_for_https ): return False # Otherwise always use a tunnel. return True
[ "def", "connection_requires_http_tunnel", "(", "proxy_url", "=", "None", ",", "proxy_config", "=", "None", ",", "destination_scheme", "=", "None", ")", ":", "# If we're not using a proxy, no way to use a tunnel.", "if", "proxy_url", "is", "None", ":", "return", "False", "# HTTP destinations never require tunneling, we always forward.", "if", "destination_scheme", "==", "\"http\"", ":", "return", "False", "# Support for forwarding with HTTPS proxies and HTTPS destinations.", "if", "(", "proxy_url", ".", "scheme", "==", "\"https\"", "and", "proxy_config", "and", "proxy_config", ".", "use_forwarding_for_https", ")", ":", "return", "False", "# Otherwise always use a tunnel.", "return", "True" ]
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/urllib3/util/proxy.py#L4-L34
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/email/message.py
python
Message.set_param
(self, param, value, header='Content-Type', requote=True, charset=None, language='')
Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to "text/plain" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings.
Set a parameter in the Content-Type header.
[ "Set", "a", "parameter", "in", "the", "Content", "-", "Type", "header", "." ]
def set_param(self, param, value, header='Content-Type', requote=True, charset=None, language=''): """Set a parameter in the Content-Type header. If the parameter already exists in the header, its value will be replaced with the new value. If header is Content-Type and has not yet been defined for this message, it will be set to "text/plain" and the new parameter and value will be appended as per RFC 2045. An alternate header can specified in the header argument, and all parameters will be quoted as necessary unless requote is False. If charset is specified, the parameter will be encoded according to RFC 2231. Optional language specifies the RFC 2231 language, defaulting to the empty string. Both charset and language should be strings. """ if not isinstance(value, tuple) and charset: value = (charset, language, value) if header not in self and header.lower() == 'content-type': ctype = 'text/plain' else: ctype = self.get(header) if not self.get_param(param, header=header): if not ctype: ctype = _formatparam(param, value, requote) else: ctype = SEMISPACE.join( [ctype, _formatparam(param, value, requote)]) else: ctype = '' for old_param, old_value in self.get_params(header=header, unquote=requote): append_param = '' if old_param.lower() == param.lower(): append_param = _formatparam(param, value, requote) else: append_param = _formatparam(old_param, old_value, requote) if not ctype: ctype = append_param else: ctype = SEMISPACE.join([ctype, append_param]) if ctype != self.get(header): del self[header] self[header] = ctype
[ "def", "set_param", "(", "self", ",", "param", ",", "value", ",", "header", "=", "'Content-Type'", ",", "requote", "=", "True", ",", "charset", "=", "None", ",", "language", "=", "''", ")", ":", "if", "not", "isinstance", "(", "value", ",", "tuple", ")", "and", "charset", ":", "value", "=", "(", "charset", ",", "language", ",", "value", ")", "if", "header", "not", "in", "self", "and", "header", ".", "lower", "(", ")", "==", "'content-type'", ":", "ctype", "=", "'text/plain'", "else", ":", "ctype", "=", "self", ".", "get", "(", "header", ")", "if", "not", "self", ".", "get_param", "(", "param", ",", "header", "=", "header", ")", ":", "if", "not", "ctype", ":", "ctype", "=", "_formatparam", "(", "param", ",", "value", ",", "requote", ")", "else", ":", "ctype", "=", "SEMISPACE", ".", "join", "(", "[", "ctype", ",", "_formatparam", "(", "param", ",", "value", ",", "requote", ")", "]", ")", "else", ":", "ctype", "=", "''", "for", "old_param", ",", "old_value", "in", "self", ".", "get_params", "(", "header", "=", "header", ",", "unquote", "=", "requote", ")", ":", "append_param", "=", "''", "if", "old_param", ".", "lower", "(", ")", "==", "param", ".", "lower", "(", ")", ":", "append_param", "=", "_formatparam", "(", "param", ",", "value", ",", "requote", ")", "else", ":", "append_param", "=", "_formatparam", "(", "old_param", ",", "old_value", ",", "requote", ")", "if", "not", "ctype", ":", "ctype", "=", "append_param", "else", ":", "ctype", "=", "SEMISPACE", ".", "join", "(", "[", "ctype", ",", "append_param", "]", ")", "if", "ctype", "!=", "self", ".", "get", "(", "header", ")", ":", "del", "self", "[", "header", "]", "self", "[", "header", "]", "=", "ctype" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/email/message.py#L571-L617
mathics/Mathics
318e06dea8f1c70758a50cb2f95c9900150e3a68
mathics/builtin/system.py
python
Share.apply_1
(self, symbol, evaluation)
return Integer0
Share[symbol_Symbol]
Share[symbol_Symbol]
[ "Share", "[", "symbol_Symbol", "]" ]
def apply_1(self, symbol, evaluation) -> Integer: """Share[symbol_Symbol]""" # TODO: implement a routine that swap all the definitions, # collecting repeated symbols and expressions, and then # remplace them by references. # Return the amount of memory recovered. return Integer0
[ "def", "apply_1", "(", "self", ",", "symbol", ",", "evaluation", ")", "->", "Integer", ":", "# TODO: implement a routine that swap all the definitions,", "# collecting repeated symbols and expressions, and then", "# remplace them by references.", "# Return the amount of memory recovered.", "return", "Integer0" ]
https://github.com/mathics/Mathics/blob/318e06dea8f1c70758a50cb2f95c9900150e3a68/mathics/builtin/system.py#L608-L614
selfteaching/selfteaching-python-camp
9982ee964b984595e7d664b07c389cddaf158f1e
19100205/Ceasar1978/pip-19.0.3/src/pip/_vendor/html5lib/treebuilders/base.py
python
Node.__init__
(self, name)
Creates a Node :arg name: The tag name associated with the node
Creates a Node
[ "Creates", "a", "Node" ]
def __init__(self, name): """Creates a Node :arg name: The tag name associated with the node """ # The tag name assocaited with the node self.name = name # The parent of the current node (or None for the document node) self.parent = None # The value of the current node (applies to text nodes and comments) self.value = None # A dict holding name -> value pairs for attributes of the node self.attributes = {} # A list of child nodes of the current node. This must include all # elements but not necessarily other node types. self.childNodes = [] # A list of miscellaneous flags that can be set on the node. self._flags = []
[ "def", "__init__", "(", "self", ",", "name", ")", ":", "# The tag name assocaited with the node", "self", ".", "name", "=", "name", "# The parent of the current node (or None for the document node)", "self", ".", "parent", "=", "None", "# The value of the current node (applies to text nodes and comments)", "self", ".", "value", "=", "None", "# A dict holding name -> value pairs for attributes of the node", "self", ".", "attributes", "=", "{", "}", "# A list of child nodes of the current node. This must include all", "# elements but not necessarily other node types.", "self", ".", "childNodes", "=", "[", "]", "# A list of miscellaneous flags that can be set on the node.", "self", ".", "_flags", "=", "[", "]" ]
https://github.com/selfteaching/selfteaching-python-camp/blob/9982ee964b984595e7d664b07c389cddaf158f1e/19100205/Ceasar1978/pip-19.0.3/src/pip/_vendor/html5lib/treebuilders/base.py#L25-L43
kanzure/nanoengineer
874e4c9f8a9190f093625b267f9767e19f82e6c4
cad/src/graphics/drawing/GLPrimitiveBuffer.py
python
Hunk.changedRange
(self, chgLowID, chgHighID)
return
Record a range of data changes, for later flushing. chgLowID and chgHighID are primitive IDs, possibly spanning many hunks. The high end is the index of the one *after* the end of the range, as usual in Python.
Record a range of data changes, for later flushing.
[ "Record", "a", "range", "of", "data", "changes", "for", "later", "flushing", "." ]
def changedRange(self, chgLowID, chgHighID): """ Record a range of data changes, for later flushing. chgLowID and chgHighID are primitive IDs, possibly spanning many hunks. The high end is the index of the one *after* the end of the range, as usual in Python. """ (lowHunk, lowIndex) = decodePrimID(chgLowID) (highHunk, highIndex) = decodePrimID(chgHighID) if self.hunkNumber < lowHunk or self.hunkNumber > highHunk: return # This hunk is not in the hunk range. if lowHunk < self.hunkNumber: self.low = 0 else: self.low = min(self.low, lowIndex) # Maybe extend the range. pass if highHunk > self.hunkNumber: self.high = HUNK_SIZE else: self.high = max(self.high, highIndex) # Maybe extend the range. pass return
[ "def", "changedRange", "(", "self", ",", "chgLowID", ",", "chgHighID", ")", ":", "(", "lowHunk", ",", "lowIndex", ")", "=", "decodePrimID", "(", "chgLowID", ")", "(", "highHunk", ",", "highIndex", ")", "=", "decodePrimID", "(", "chgHighID", ")", "if", "self", ".", "hunkNumber", "<", "lowHunk", "or", "self", ".", "hunkNumber", ">", "highHunk", ":", "return", "# This hunk is not in the hunk range.", "if", "lowHunk", "<", "self", ".", "hunkNumber", ":", "self", ".", "low", "=", "0", "else", ":", "self", ".", "low", "=", "min", "(", "self", ".", "low", ",", "lowIndex", ")", "# Maybe extend the range.", "pass", "if", "highHunk", ">", "self", ".", "hunkNumber", ":", "self", ".", "high", "=", "HUNK_SIZE", "else", ":", "self", ".", "high", "=", "max", "(", "self", ".", "high", ",", "highIndex", ")", "# Maybe extend the range.", "pass", "return" ]
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/graphics/drawing/GLPrimitiveBuffer.py#L637-L664
yxgeee/MMT
057e1ea5d3054c9d7e5fa72c727298d8e4c5f668
mmt/utils/data/__init__.py
python
IterLoader.__len__
(self)
return len(self.loader)
[]
def __len__(self): if (self.length is not None): return self.length return len(self.loader)
[ "def", "__len__", "(", "self", ")", ":", "if", "(", "self", ".", "length", "is", "not", "None", ")", ":", "return", "self", ".", "length", "return", "len", "(", "self", ".", "loader", ")" ]
https://github.com/yxgeee/MMT/blob/057e1ea5d3054c9d7e5fa72c727298d8e4c5f668/mmt/utils/data/__init__.py#L12-L15
wandb/client
3963364d8112b7dedb928fa423b6878ea1b467d9
wandb/vendor/watchdog/observers/api.py
python
BaseObserver.schedule
(self, event_handler, path, recursive=False)
return watch
Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param path: Directory path that will be monitored. :type path: ``str`` :param recursive: ``True`` if events will be emitted for sub-directories traversed recursively; ``False`` otherwise. :type recursive: ``bool`` :return: An :class:`ObservedWatch` object instance representing a watch.
Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events.
[ "Schedules", "watching", "a", "path", "and", "calls", "appropriate", "methods", "specified", "in", "the", "given", "event", "handler", "in", "response", "to", "file", "system", "events", "." ]
def schedule(self, event_handler, path, recursive=False): """ Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param path: Directory path that will be monitored. :type path: ``str`` :param recursive: ``True`` if events will be emitted for sub-directories traversed recursively; ``False`` otherwise. :type recursive: ``bool`` :return: An :class:`ObservedWatch` object instance representing a watch. """ with self._lock: watch = ObservedWatch(path, recursive) self._add_handler_for_watch(event_handler, watch) # If we don't have an emitter for this watch already, create it. if self._emitter_for_watch.get(watch) is None: emitter = self._emitter_class(event_queue=self.event_queue, watch=watch, timeout=self.timeout) self._add_emitter(emitter) if self.is_alive(): emitter.start() self._watches.add(watch) return watch
[ "def", "schedule", "(", "self", ",", "event_handler", ",", "path", ",", "recursive", "=", "False", ")", ":", "with", "self", ".", "_lock", ":", "watch", "=", "ObservedWatch", "(", "path", ",", "recursive", ")", "self", ".", "_add_handler_for_watch", "(", "event_handler", ",", "watch", ")", "# If we don't have an emitter for this watch already, create it.", "if", "self", ".", "_emitter_for_watch", ".", "get", "(", "watch", ")", "is", "None", ":", "emitter", "=", "self", ".", "_emitter_class", "(", "event_queue", "=", "self", ".", "event_queue", ",", "watch", "=", "watch", ",", "timeout", "=", "self", ".", "timeout", ")", "self", ".", "_add_emitter", "(", "emitter", ")", "if", "self", ".", "is_alive", "(", ")", ":", "emitter", ".", "start", "(", ")", "self", ".", "_watches", ".", "add", "(", "watch", ")", "return", "watch" ]
https://github.com/wandb/client/blob/3963364d8112b7dedb928fa423b6878ea1b467d9/wandb/vendor/watchdog/observers/api.py#L258-L295
dragonfly/dragonfly
a579b5eadf452e23b07d4caf27b402703b0012b7
dragonfly/exd/domains.py
python
DiscreteEuclideanDomain.get_dim
(self)
return self.dim
Return the dimensions.
Return the dimensions.
[ "Return", "the", "dimensions", "." ]
def get_dim(self): """ Return the dimensions. """ return self.dim
[ "def", "get_dim", "(", "self", ")", ":", "return", "self", ".", "dim" ]
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/exd/domains.py#L234-L236
IBM/lale
b4d6829c143a4735b06083a0e6c70d2cca244162
lale/lib/aif360/util.py
python
_SymmetricDisparateImpact.__call__
(self, estimator, X, y)
return self.score_estimator(estimator, X, y)
[]
def __call__(self, estimator, X, y): return self.score_estimator(estimator, X, y)
[ "def", "__call__", "(", "self", ",", "estimator", ",", "X", ",", "y", ")", ":", "return", "self", ".", "score_estimator", "(", "estimator", ",", "X", ",", "y", ")" ]
https://github.com/IBM/lale/blob/b4d6829c143a4735b06083a0e6c70d2cca244162/lale/lib/aif360/util.py#L844-L845
awslabs/sockeye
ec2d13f7beb42d8c4f389dba0172250dc9154d5a
sockeye/utils.py
python
check_condition
(condition: bool, error_message: str)
Check the condition and if it is not met, exit with the given error message and error_code, similar to assertions. :param condition: Condition to check. :param error_message: Error message to show to the user.
Check the condition and if it is not met, exit with the given error message and error_code, similar to assertions.
[ "Check", "the", "condition", "and", "if", "it", "is", "not", "met", "exit", "with", "the", "given", "error", "message", "and", "error_code", "similar", "to", "assertions", "." ]
def check_condition(condition: bool, error_message: str): """ Check the condition and if it is not met, exit with the given error message and error_code, similar to assertions. :param condition: Condition to check. :param error_message: Error message to show to the user. """ if not condition: raise SockeyeError(error_message)
[ "def", "check_condition", "(", "condition", ":", "bool", ",", "error_message", ":", "str", ")", ":", "if", "not", "condition", ":", "raise", "SockeyeError", "(", "error_message", ")" ]
https://github.com/awslabs/sockeye/blob/ec2d13f7beb42d8c4f389dba0172250dc9154d5a/sockeye/utils.py#L144-L153
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/datetime.py
python
date.ctime
(self)
return "%s %s %2d 00:00:00 %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._year)
Return ctime() style string.
Return ctime() style string.
[ "Return", "ctime", "()", "style", "string", "." ]
def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d 00:00:00 %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._year)
[ "def", "ctime", "(", "self", ")", ":", "weekday", "=", "self", ".", "toordinal", "(", ")", "%", "7", "or", "7", "return", "\"%s %s %2d 00:00:00 %04d\"", "%", "(", "_DAYNAMES", "[", "weekday", "]", ",", "_MONTHNAMES", "[", "self", ".", "_month", "]", ",", "self", ".", "_day", ",", "self", ".", "_year", ")" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/datetime.py#L733-L739
makinacorpus/landez
6e5c71ded6071158e7943df204cd7bd1ed623a30
landez/sources.py
python
TileDownloader.tile
(self, z, x, y)
Download the specified tile from `tiles_url`
Download the specified tile from `tiles_url`
[ "Download", "the", "specified", "tile", "from", "tiles_url" ]
def tile(self, z, x, y): """ Download the specified tile from `tiles_url` """ logger.debug(_("Download tile %s") % ((z, x, y),)) # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... ) size = self.tilesize s = self.tiles_subdomains[(x + y) % len(self.tiles_subdomains)]; try: url = self.tiles_url.format(**locals()) except KeyError as e: raise DownloadError(_("Unknown keyword %s in URL") % e) logger.debug(_("Retrieve tile at %s") % url) r = DOWNLOAD_RETRIES sleeptime = 1 while r > 0: try: request = requests.get(url, headers=self.headers) if request.status_code == 200: return request.content raise DownloadError(_("Status code : %s, url : %s") % (request.status_code, url)) except requests.exceptions.ConnectionError as e: logger.debug(_("Download error, retry (%s left). (%s)") % (r, e)) r -= 1 time.sleep(sleeptime) # progressivly sleep longer to wait for this tile if (sleeptime <= 10) and (r % 2 == 0): sleeptime += 1 # increase wait raise DownloadError(_("Cannot download URL %s") % url)
[ "def", "tile", "(", "self", ",", "z", ",", "x", ",", "y", ")", ":", "logger", ".", "debug", "(", "_", "(", "\"Download tile %s\"", ")", "%", "(", "(", "z", ",", "x", ",", "y", ")", ",", ")", ")", "# Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )", "size", "=", "self", ".", "tilesize", "s", "=", "self", ".", "tiles_subdomains", "[", "(", "x", "+", "y", ")", "%", "len", "(", "self", ".", "tiles_subdomains", ")", "]", "try", ":", "url", "=", "self", ".", "tiles_url", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "except", "KeyError", "as", "e", ":", "raise", "DownloadError", "(", "_", "(", "\"Unknown keyword %s in URL\"", ")", "%", "e", ")", "logger", ".", "debug", "(", "_", "(", "\"Retrieve tile at %s\"", ")", "%", "url", ")", "r", "=", "DOWNLOAD_RETRIES", "sleeptime", "=", "1", "while", "r", ">", "0", ":", "try", ":", "request", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "headers", ")", "if", "request", ".", "status_code", "==", "200", ":", "return", "request", ".", "content", "raise", "DownloadError", "(", "_", "(", "\"Status code : %s, url : %s\"", ")", "%", "(", "request", ".", "status_code", ",", "url", ")", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "logger", ".", "debug", "(", "_", "(", "\"Download error, retry (%s left). (%s)\"", ")", "%", "(", "r", ",", "e", ")", ")", "r", "-=", "1", "time", ".", "sleep", "(", "sleeptime", ")", "# progressivly sleep longer to wait for this tile", "if", "(", "sleeptime", "<=", "10", ")", "and", "(", "r", "%", "2", "==", "0", ")", ":", "sleeptime", "+=", "1", "# increase wait", "raise", "DownloadError", "(", "_", "(", "\"Cannot download URL %s\"", ")", "%", "url", ")" ]
https://github.com/makinacorpus/landez/blob/6e5c71ded6071158e7943df204cd7bd1ed623a30/landez/sources.py#L163-L192
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/providers/azure/azure_blob_storage.py
python
AzureBlobStorageService.ListTopLevelSubfolders
(self, bucket)
return list(unique_folders)
Lists the top level folders (not files) in a bucket. Each listed item is a full file name, eg. "supplier/supplier.csv", so just the high level folder name is extracted, and repetitions are eliminated for when there's multiple files in a folder. Args: bucket: Name of the bucket to list the top level subfolders of. Returns: A list of top level subfolder names. Can be empty if there are no folders.
Lists the top level folders (not files) in a bucket.
[ "Lists", "the", "top", "level", "folders", "(", "not", "files", ")", "in", "a", "bucket", "." ]
def ListTopLevelSubfolders(self, bucket): """Lists the top level folders (not files) in a bucket. Each listed item is a full file name, eg. "supplier/supplier.csv", so just the high level folder name is extracted, and repetitions are eliminated for when there's multiple files in a folder. Args: bucket: Name of the bucket to list the top level subfolders of. Returns: A list of top level subfolder names. Can be empty if there are no folders. """ unique_folders = set([ obj.split('/')[0].strip() for obj in self.List(bucket) if obj and obj.contains('/') ]) return list(unique_folders)
[ "def", "ListTopLevelSubfolders", "(", "self", ",", "bucket", ")", ":", "unique_folders", "=", "set", "(", "[", "obj", ".", "split", "(", "'/'", ")", "[", "0", "]", ".", "strip", "(", ")", "for", "obj", "in", "self", ".", "List", "(", "bucket", ")", "if", "obj", "and", "obj", ".", "contains", "(", "'/'", ")", "]", ")", "return", "list", "(", "unique_folders", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/providers/azure/azure_blob_storage.py#L197-L215
GaoQ1/rasa_nlu_gq
aea2d037220f022b2773d0852cbcfbad582e8e25
rasa_nlu_gao/models/bert/modeling.py
python
reshape_to_matrix
(input_tensor)
return output_tensor
Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).
Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).
[ "Reshapes", "a", ">", "=", "rank", "2", "tensor", "to", "a", "rank", "2", "tensor", "(", "i", ".", "e", ".", "a", "matrix", ")", "." ]
def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor
[ "def", "reshape_to_matrix", "(", "input_tensor", ")", ":", "ndims", "=", "input_tensor", ".", "shape", ".", "ndims", "if", "ndims", "<", "2", ":", "raise", "ValueError", "(", "\"Input tensor must have at least rank 2. Shape = %s\"", "%", "(", "input_tensor", ".", "shape", ")", ")", "if", "ndims", "==", "2", ":", "return", "input_tensor", "width", "=", "input_tensor", ".", "shape", "[", "-", "1", "]", "output_tensor", "=", "tf", ".", "reshape", "(", "input_tensor", ",", "[", "-", "1", ",", "width", "]", ")", "return", "output_tensor" ]
https://github.com/GaoQ1/rasa_nlu_gq/blob/aea2d037220f022b2773d0852cbcfbad582e8e25/rasa_nlu_gao/models/bert/modeling.py#L942-L953
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/core/strings.py
python
str_encode
(arr, encoding, errors="strict")
return _na_map(f, arr)
Encode character string in the Series/Index using indicated encoding. Equivalent to :meth:`str.encode`. Parameters ---------- encoding : str errors : str, optional Returns ------- encoded : Series/Index of objects
Encode character string in the Series/Index using indicated encoding. Equivalent to :meth:`str.encode`.
[ "Encode", "character", "string", "in", "the", "Series", "/", "Index", "using", "indicated", "encoding", ".", "Equivalent", "to", ":", "meth", ":", "str", ".", "encode", "." ]
def str_encode(arr, encoding, errors="strict"): """ Encode character string in the Series/Index using indicated encoding. Equivalent to :meth:`str.encode`. Parameters ---------- encoding : str errors : str, optional Returns ------- encoded : Series/Index of objects """ if encoding in _cpython_optimized_encoders: # CPython optimized implementation f = lambda x: x.encode(encoding, errors) else: encoder = codecs.getencoder(encoding) f = lambda x: encoder(x, errors)[0] return _na_map(f, arr)
[ "def", "str_encode", "(", "arr", ",", "encoding", ",", "errors", "=", "\"strict\"", ")", ":", "if", "encoding", "in", "_cpython_optimized_encoders", ":", "# CPython optimized implementation", "f", "=", "lambda", "x", ":", "x", ".", "encode", "(", "encoding", ",", "errors", ")", "else", ":", "encoder", "=", "codecs", ".", "getencoder", "(", "encoding", ")", "f", "=", "lambda", "x", ":", "encoder", "(", "x", ",", "errors", ")", "[", "0", "]", "return", "_na_map", "(", "f", ",", "arr", ")" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/core/strings.py#L1713-L1733
scrapinghub/frontera
84f9e1034d2868447db88e865596c0fbb32e70f6
frontera/core/manager.py
python
ComponentsPipelineMixin.middlewares
(self)
return self._middlewares
A list of :class:`Middleware <frontera.core.components.Middleware>` objects to be used by the frontier. \ Can be defined with :setting:`MIDDLEWARES` setting.
A list of :class:`Middleware <frontera.core.components.Middleware>` objects to be used by the frontier. \ Can be defined with :setting:`MIDDLEWARES` setting.
[ "A", "list", "of", ":", "class", ":", "Middleware", "<frontera", ".", "core", ".", "components", ".", "Middleware", ">", "objects", "to", "be", "used", "by", "the", "frontier", ".", "\\", "Can", "be", "defined", "with", ":", "setting", ":", "MIDDLEWARES", "setting", "." ]
def middlewares(self): """ A list of :class:`Middleware <frontera.core.components.Middleware>` objects to be used by the frontier. \ Can be defined with :setting:`MIDDLEWARES` setting. """ return self._middlewares
[ "def", "middlewares", "(", "self", ")", ":", "return", "self", ".", "_middlewares" ]
https://github.com/scrapinghub/frontera/blob/84f9e1034d2868447db88e865596c0fbb32e70f6/frontera/core/manager.py#L97-L102
stratosphereips/Manati
20e55d49edf00f8503807c62397d02a0dad9ddff
manati/api_manager/core/modules_manager.py
python
ModulesManager.bulk_labeling_by_whois_relation
(username, analysis_session_id, domain,verdict)
[]
def bulk_labeling_by_whois_relation(username, analysis_session_id, domain,verdict): queue = django_rq.get_queue('high') queue.enqueue(__bulk_labeling_by_whois_relation_aux__, username, analysis_session_id, domain,verdict)
[ "def", "bulk_labeling_by_whois_relation", "(", "username", ",", "analysis_session_id", ",", "domain", ",", "verdict", ")", ":", "queue", "=", "django_rq", ".", "get_queue", "(", "'high'", ")", "queue", ".", "enqueue", "(", "__bulk_labeling_by_whois_relation_aux__", ",", "username", ",", "analysis_session_id", ",", "domain", ",", "verdict", ")" ]
https://github.com/stratosphereips/Manati/blob/20e55d49edf00f8503807c62397d02a0dad9ddff/manati/api_manager/core/modules_manager.py#L301-L306
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/windows_virtual_machine.py
python
BaseWindowsMixin.WaitForBootCompletion
(self)
Waits until VM is has booted.
Waits until VM is has booted.
[ "Waits", "until", "VM", "is", "has", "booted", "." ]
def WaitForBootCompletion(self): """Waits until VM is has booted.""" to_wait_for = [self._WaitForWinRmCommand] if FLAGS.cluster_boot_test_rdp_port_listening: to_wait_for.append(self._WaitForRdpPort) vm_util.RunParallelThreads([(method, [], {}) for method in to_wait_for], 2)
[ "def", "WaitForBootCompletion", "(", "self", ")", ":", "to_wait_for", "=", "[", "self", ".", "_WaitForWinRmCommand", "]", "if", "FLAGS", ".", "cluster_boot_test_rdp_port_listening", ":", "to_wait_for", ".", "append", "(", "self", ".", "_WaitForRdpPort", ")", "vm_util", ".", "RunParallelThreads", "(", "[", "(", "method", ",", "[", "]", ",", "{", "}", ")", "for", "method", "in", "to_wait_for", "]", ",", "2", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/windows_virtual_machine.py#L410-L415
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/email/headerregistry.py
python
HeaderRegistry.map_to_type
(self, name, cls)
Register cls as the specialized class for handling "name" headers.
Register cls as the specialized class for handling "name" headers.
[ "Register", "cls", "as", "the", "specialized", "class", "for", "handling", "name", "headers", "." ]
def map_to_type(self, name, cls): """Register cls as the specialized class for handling "name" headers. """ self.registry[name.lower()] = cls
[ "def", "map_to_type", "(", "self", ",", "name", ",", "cls", ")", ":", "self", ".", "registry", "[", "name", ".", "lower", "(", ")", "]", "=", "cls" ]
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/email/headerregistry.py#L566-L570
facebookresearch/ParlAI
e4d59c30eef44f1f67105961b82a83fd28d7d78b
parlai/core/teachers.py
python
ChunkTeacher._setup_data
(self, datatype)
Passthrough.
Passthrough.
[ "Passthrough", "." ]
def _setup_data(self, datatype): """ Passthrough. """ pass
[ "def", "_setup_data", "(", "self", ",", "datatype", ")", ":", "pass" ]
https://github.com/facebookresearch/ParlAI/blob/e4d59c30eef44f1f67105961b82a83fd28d7d78b/parlai/core/teachers.py#L2395-L2399
vtr0n/TelegramTUI
a64f910971187d0d9f4192fb2ad369599d9d87c5
telegramtui/src/npyscreen/proto_fm_screen_area.py
python
ScreenArea._max_physical
(self)
return (mxy-1, mxx-1)
How big is the physical screen?
How big is the physical screen?
[ "How", "big", "is", "the", "physical", "screen?" ]
def _max_physical(self): "How big is the physical screen?" # On OS X newwin does not correctly get the size of the screen. # let's see how big we could be: create a temp screen # and see the size curses makes it. No good to keep, though try: mxy, mxx = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, 'xxxx')) if (mxy, mxx) == (0,0): raise ValueError except (ValueError, NameError): mxy, mxx = curses.newwin(0,0).getmaxyx() # return safe values, i.e. slightly smaller. return (mxy-1, mxx-1)
[ "def", "_max_physical", "(", "self", ")", ":", "# On OS X newwin does not correctly get the size of the screen.", "# let's see how big we could be: create a temp screen", "# and see the size curses makes it. No good to keep, though", "try", ":", "mxy", ",", "mxx", "=", "struct", ".", "unpack", "(", "'hh'", ",", "fcntl", ".", "ioctl", "(", "sys", ".", "stderr", ".", "fileno", "(", ")", ",", "termios", ".", "TIOCGWINSZ", ",", "'xxxx'", ")", ")", "if", "(", "mxy", ",", "mxx", ")", "==", "(", "0", ",", "0", ")", ":", "raise", "ValueError", "except", "(", "ValueError", ",", "NameError", ")", ":", "mxy", ",", "mxx", "=", "curses", ".", "newwin", "(", "0", ",", "0", ")", ".", "getmaxyx", "(", ")", "# return safe values, i.e. slightly smaller.", "return", "(", "mxy", "-", "1", ",", "mxx", "-", "1", ")" ]
https://github.com/vtr0n/TelegramTUI/blob/a64f910971187d0d9f4192fb2ad369599d9d87c5/telegramtui/src/npyscreen/proto_fm_screen_area.py#L111-L124
VirtueSecurity/aws-extender
d123b7e1a845847709ba3a481f11996bddc68a1c
BappModules/boto/redshift/layer1.py
python
RedshiftConnection.modify_cluster
(self, cluster_identifier, cluster_type=None, node_type=None, number_of_nodes=None, cluster_security_groups=None, vpc_security_group_ids=None, master_user_password=None, cluster_parameter_group_name=None, automated_snapshot_retention_period=None, preferred_maintenance_window=None, cluster_version=None, allow_version_upgrade=None, hsm_client_certificate_identifier=None, hsm_configuration_identifier=None, new_cluster_identifier=None)
return self._make_request( action='ModifyCluster', verb='POST', path='/', params=params)
Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to `Amazon Redshift Clusters`_ in the Amazon Redshift Management Guide You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change. If you specify the same number of nodes and node type that are already configured for the cluster, an error is returned. :type cluster_identifier: string :param cluster_identifier: The unique identifier of the cluster to be modified. Example: `examplecluster` :type cluster_type: string :param cluster_type: The new cluster type. When you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request. Valid Values: ` multi-node | single-node ` :type node_type: string :param node_type: The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter also. When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use the DescribeResize to track the progress of the resize request. Valid Values: ` dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | `dw2.8xlarge`. :type number_of_nodes: integer :param number_of_nodes: The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter also. When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request. Valid Values: Integer greater than `0`. :type cluster_security_groups: list :param cluster_security_groups: A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible. Security groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster. Constraints: + Must be 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type vpc_security_group_ids: list :param vpc_security_group_ids: A list of virtual private cloud (VPC) security groups to be associated with the cluster. :type master_user_password: string :param master_user_password: The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the `MasterUserPassword` element exists in the `PendingModifiedValues` element of the operation response. Default: Uses existing setting. Constraints: + Must be between 8 and 64 characters in length. + Must contain at least one uppercase letter. + Must contain at least one lowercase letter. + Must contain one number. + Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), " (double quote), \, /, @, or space. :type cluster_parameter_group_name: string :param cluster_parameter_group_name: The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster. Default: Uses existing setting. Constraints: The cluster parameter group must be in the same parameter group family that matches the cluster version. :type automated_snapshot_retention_period: integer :param automated_snapshot_retention_period: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot. If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted. Default: Uses existing setting. Constraints: Must be a value from 0 to 35. :type preferred_maintenance_window: string :param preferred_maintenance_window: The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied. Default: Uses existing setting. Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes. :type cluster_version: string :param cluster_version: The new version number of the Amazon Redshift engine to upgrade to. For major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about managing parameter groups, go to `Amazon Redshift Parameter Groups`_ in the Amazon Redshift Management Guide . Example: `1.0` :type allow_version_upgrade: boolean :param allow_version_upgrade: If `True`, upgrades will be applied automatically to the cluster during the maintenance window. Default: `False` :type hsm_client_certificate_identifier: string :param hsm_client_certificate_identifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. :type hsm_configuration_identifier: string :param hsm_configuration_identifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. :type new_cluster_identifier: string :param new_cluster_identifier: The new identifier for the cluster. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens. + Alphabetic characters must be lowercase. + First character must be a letter. + Cannot end with a hyphen or contain two consecutive hyphens. + Must be unique for all clusters within an AWS account. Example: `examplecluster`
Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to `Amazon Redshift Clusters`_ in the Amazon Redshift Management Guide
[ "Modifies", "the", "settings", "for", "a", "cluster", ".", "For", "example", "you", "can", "add", "another", "security", "or", "parameter", "group", "update", "the", "preferred", "maintenance", "window", "or", "change", "the", "master", "user", "password", ".", "Resetting", "a", "cluster", "password", "or", "modifying", "the", "security", "groups", "associated", "with", "a", "cluster", "do", "not", "need", "a", "reboot", ".", "However", "modifying", "a", "parameter", "group", "requires", "a", "reboot", "for", "parameters", "to", "take", "effect", ".", "For", "more", "information", "about", "managing", "clusters", "go", "to", "Amazon", "Redshift", "Clusters", "_", "in", "the", "Amazon", "Redshift", "Management", "Guide" ]
def modify_cluster(self, cluster_identifier, cluster_type=None, node_type=None, number_of_nodes=None, cluster_security_groups=None, vpc_security_group_ids=None, master_user_password=None, cluster_parameter_group_name=None, automated_snapshot_retention_period=None, preferred_maintenance_window=None, cluster_version=None, allow_version_upgrade=None, hsm_client_certificate_identifier=None, hsm_configuration_identifier=None, new_cluster_identifier=None): """ Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to `Amazon Redshift Clusters`_ in the Amazon Redshift Management Guide You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change. If you specify the same number of nodes and node type that are already configured for the cluster, an error is returned. :type cluster_identifier: string :param cluster_identifier: The unique identifier of the cluster to be modified. Example: `examplecluster` :type cluster_type: string :param cluster_type: The new cluster type. When you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request. Valid Values: ` multi-node | single-node ` :type node_type: string :param node_type: The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter also. When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use the DescribeResize to track the progress of the resize request. Valid Values: ` dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | `dw2.8xlarge`. :type number_of_nodes: integer :param number_of_nodes: The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter also. When you submit your request to resize a cluster, Amazon Redshift sets access permissions for the cluster to read-only. After Amazon Redshift provisions a new cluster according to your resize requirements, there will be a temporary outage while the old cluster is deleted and your connection is switched to the new cluster. When the new connection is complete, the original access permissions for the cluster are restored. You can use DescribeResize to track the progress of the resize request. Valid Values: Integer greater than `0`. :type cluster_security_groups: list :param cluster_security_groups: A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible. Security groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster. Constraints: + Must be 1 to 255 alphanumeric characters or hyphens + First character must be a letter + Cannot end with a hyphen or contain two consecutive hyphens :type vpc_security_group_ids: list :param vpc_security_group_ids: A list of virtual private cloud (VPC) security groups to be associated with the cluster. :type master_user_password: string :param master_user_password: The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the `MasterUserPassword` element exists in the `PendingModifiedValues` element of the operation response. Default: Uses existing setting. Constraints: + Must be between 8 and 64 characters in length. + Must contain at least one uppercase letter. + Must contain at least one lowercase letter. + Must contain one number. + Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), " (double quote), \, /, @, or space. :type cluster_parameter_group_name: string :param cluster_parameter_group_name: The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster. Default: Uses existing setting. Constraints: The cluster parameter group must be in the same parameter group family that matches the cluster version. :type automated_snapshot_retention_period: integer :param automated_snapshot_retention_period: The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot. If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted. Default: Uses existing setting. Constraints: Must be a value from 0 to 35. :type preferred_maintenance_window: string :param preferred_maintenance_window: The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied. Default: Uses existing setting. Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes. :type cluster_version: string :param cluster_version: The new version number of the Amazon Redshift engine to upgrade to. For major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about managing parameter groups, go to `Amazon Redshift Parameter Groups`_ in the Amazon Redshift Management Guide . Example: `1.0` :type allow_version_upgrade: boolean :param allow_version_upgrade: If `True`, upgrades will be applied automatically to the cluster during the maintenance window. Default: `False` :type hsm_client_certificate_identifier: string :param hsm_client_certificate_identifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. :type hsm_configuration_identifier: string :param hsm_configuration_identifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. :type new_cluster_identifier: string :param new_cluster_identifier: The new identifier for the cluster. Constraints: + Must contain from 1 to 63 alphanumeric characters or hyphens. + Alphabetic characters must be lowercase. + First character must be a letter. + Cannot end with a hyphen or contain two consecutive hyphens. + Must be unique for all clusters within an AWS account. Example: `examplecluster` """ params = {'ClusterIdentifier': cluster_identifier, } if cluster_type is not None: params['ClusterType'] = cluster_type if node_type is not None: params['NodeType'] = node_type if number_of_nodes is not None: params['NumberOfNodes'] = number_of_nodes if cluster_security_groups is not None: self.build_list_params(params, cluster_security_groups, 'ClusterSecurityGroups.member') if vpc_security_group_ids is not None: self.build_list_params(params, vpc_security_group_ids, 'VpcSecurityGroupIds.member') if master_user_password is not None: params['MasterUserPassword'] = master_user_password if cluster_parameter_group_name is not None: params['ClusterParameterGroupName'] = cluster_parameter_group_name if automated_snapshot_retention_period is not None: params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period if preferred_maintenance_window is not None: params['PreferredMaintenanceWindow'] = preferred_maintenance_window if cluster_version is not None: params['ClusterVersion'] = cluster_version if allow_version_upgrade is not None: params['AllowVersionUpgrade'] = str( allow_version_upgrade).lower() if hsm_client_certificate_identifier is not None: params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier if hsm_configuration_identifier is not None: params['HsmConfigurationIdentifier'] = hsm_configuration_identifier if new_cluster_identifier is not None: params['NewClusterIdentifier'] = new_cluster_identifier return self._make_request( action='ModifyCluster', verb='POST', path='/', params=params)
[ "def", "modify_cluster", "(", "self", ",", "cluster_identifier", ",", "cluster_type", "=", "None", ",", "node_type", "=", "None", ",", "number_of_nodes", "=", "None", ",", "cluster_security_groups", "=", "None", ",", "vpc_security_group_ids", "=", "None", ",", "master_user_password", "=", "None", ",", "cluster_parameter_group_name", "=", "None", ",", "automated_snapshot_retention_period", "=", "None", ",", "preferred_maintenance_window", "=", "None", ",", "cluster_version", "=", "None", ",", "allow_version_upgrade", "=", "None", ",", "hsm_client_certificate_identifier", "=", "None", ",", "hsm_configuration_identifier", "=", "None", ",", "new_cluster_identifier", "=", "None", ")", ":", "params", "=", "{", "'ClusterIdentifier'", ":", "cluster_identifier", ",", "}", "if", "cluster_type", "is", "not", "None", ":", "params", "[", "'ClusterType'", "]", "=", "cluster_type", "if", "node_type", "is", "not", "None", ":", "params", "[", "'NodeType'", "]", "=", "node_type", "if", "number_of_nodes", "is", "not", "None", ":", "params", "[", "'NumberOfNodes'", "]", "=", "number_of_nodes", "if", "cluster_security_groups", "is", "not", "None", ":", "self", ".", "build_list_params", "(", "params", ",", "cluster_security_groups", ",", "'ClusterSecurityGroups.member'", ")", "if", "vpc_security_group_ids", "is", "not", "None", ":", "self", ".", "build_list_params", "(", "params", ",", "vpc_security_group_ids", ",", "'VpcSecurityGroupIds.member'", ")", "if", "master_user_password", "is", "not", "None", ":", "params", "[", "'MasterUserPassword'", "]", "=", "master_user_password", "if", "cluster_parameter_group_name", "is", "not", "None", ":", "params", "[", "'ClusterParameterGroupName'", "]", "=", "cluster_parameter_group_name", "if", "automated_snapshot_retention_period", "is", "not", "None", ":", "params", "[", "'AutomatedSnapshotRetentionPeriod'", "]", "=", "automated_snapshot_retention_period", "if", "preferred_maintenance_window", "is", "not", "None", ":", "params", "[", "'PreferredMaintenanceWindow'", "]", "=", "preferred_maintenance_window", "if", "cluster_version", "is", "not", "None", ":", "params", "[", "'ClusterVersion'", "]", "=", "cluster_version", "if", "allow_version_upgrade", "is", "not", "None", ":", "params", "[", "'AllowVersionUpgrade'", "]", "=", "str", "(", "allow_version_upgrade", ")", ".", "lower", "(", ")", "if", "hsm_client_certificate_identifier", "is", "not", "None", ":", "params", "[", "'HsmClientCertificateIdentifier'", "]", "=", "hsm_client_certificate_identifier", "if", "hsm_configuration_identifier", "is", "not", "None", ":", "params", "[", "'HsmConfigurationIdentifier'", "]", "=", "hsm_configuration_identifier", "if", "new_cluster_identifier", "is", "not", "None", ":", "params", "[", "'NewClusterIdentifier'", "]", "=", "new_cluster_identifier", "return", "self", ".", "_make_request", "(", "action", "=", "'ModifyCluster'", ",", "verb", "=", "'POST'", ",", "path", "=", "'/'", ",", "params", "=", "params", ")" ]
https://github.com/VirtueSecurity/aws-extender/blob/d123b7e1a845847709ba3a481f11996bddc68a1c/BappModules/boto/redshift/layer1.py#L2252-L2489
yianjiajia/django_web_ansible
1103343082a65abf9d37310f5048514d74930753
devops/apps/ansible/elfinder/volumes/base.py
python
ElfinderVolumeDriver._mimetype
(self, path)
Attempt to read the file's mimetype. Should return ``None`` on fail. .. warning:: **Not implemented**, each driver must provide its own imlementation.
Attempt to read the file's mimetype. Should return ``None`` on fail. .. warning:: **Not implemented**, each driver must provide its own imlementation.
[ "Attempt", "to", "read", "the", "file", "s", "mimetype", ".", "Should", "return", "None", "on", "fail", ".", "..", "warning", "::", "**", "Not", "implemented", "**", "each", "driver", "must", "provide", "its", "own", "imlementation", "." ]
def _mimetype(self, path): """ Attempt to read the file's mimetype. Should return ``None`` on fail. .. warning:: **Not implemented**, each driver must provide its own imlementation. """ raise NotImplementedError
[ "def", "_mimetype", "(", "self", ",", "path", ")", ":", "raise", "NotImplementedError" ]
https://github.com/yianjiajia/django_web_ansible/blob/1103343082a65abf9d37310f5048514d74930753/devops/apps/ansible/elfinder/volumes/base.py#L1931-L1941
datactive/bigbang
d4fef7eb41ae04e51f4e369de5a721c66231202b
bigbang/analysis/graph.py
python
ascendancy
(am)
return A
Ulanowicz ecosystem health measures Input is weighted adjacency matrix.
Ulanowicz ecosystem health measures Input is weighted adjacency matrix.
[ "Ulanowicz", "ecosystem", "health", "measures", "Input", "is", "weighted", "adjacency", "matrix", "." ]
def ascendancy(am): """ Ulanowicz ecosystem health measures Input is weighted adjacency matrix. """ # should these be normalized?!?! # output rates s0 = np.tile(np.sum(am, 0).T, (am.shape[0], 1)) # input rates s1 = np.tile(np.sum(am, 1).T, (am.shape[1], 1)).T logs = np.nan_to_num(np.log(am * np.sum(am) / (s0 * s1))) # ascendancy! A = np.sum(am * logs) return A
[ "def", "ascendancy", "(", "am", ")", ":", "# should these be normalized?!?!", "# output rates", "s0", "=", "np", ".", "tile", "(", "np", ".", "sum", "(", "am", ",", "0", ")", ".", "T", ",", "(", "am", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "# input rates", "s1", "=", "np", ".", "tile", "(", "np", ".", "sum", "(", "am", ",", "1", ")", ".", "T", ",", "(", "am", ".", "shape", "[", "1", "]", ",", "1", ")", ")", ".", "T", "logs", "=", "np", ".", "nan_to_num", "(", "np", ".", "log", "(", "am", "*", "np", ".", "sum", "(", "am", ")", "/", "(", "s0", "*", "s1", ")", ")", ")", "# ascendancy!", "A", "=", "np", ".", "sum", "(", "am", "*", "logs", ")", "return", "A" ]
https://github.com/datactive/bigbang/blob/d4fef7eb41ae04e51f4e369de5a721c66231202b/bigbang/analysis/graph.py#L113-L129
DylanWusee/pointconv
f39dc3e101af2f52544181ee20c14f73279b48ae
utils/provider.py
python
random_scale_point_cloud
(batch_data, scale_low=0.8, scale_high=1.25)
return batch_data
Randomly scale the point cloud. Scale is per point cloud. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, scaled batch of point clouds
Randomly scale the point cloud. Scale is per point cloud. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, scaled batch of point clouds
[ "Randomly", "scale", "the", "point", "cloud", ".", "Scale", "is", "per", "point", "cloud", ".", "Input", ":", "BxNx3", "array", "original", "batch", "of", "point", "clouds", "Return", ":", "BxNx3", "array", "scaled", "batch", "of", "point", "clouds" ]
def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): """ Randomly scale the point cloud. Scale is per point cloud. Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, scaled batch of point clouds """ B, N, C = batch_data.shape scales = np.random.uniform(scale_low, scale_high, B) for batch_index in range(B): batch_data[batch_index,:,:] *= scales[batch_index] return batch_data
[ "def", "random_scale_point_cloud", "(", "batch_data", ",", "scale_low", "=", "0.8", ",", "scale_high", "=", "1.25", ")", ":", "B", ",", "N", ",", "C", "=", "batch_data", ".", "shape", "scales", "=", "np", ".", "random", ".", "uniform", "(", "scale_low", ",", "scale_high", ",", "B", ")", "for", "batch_index", "in", "range", "(", "B", ")", ":", "batch_data", "[", "batch_index", ",", ":", ",", ":", "]", "*=", "scales", "[", "batch_index", "]", "return", "batch_data" ]
https://github.com/DylanWusee/pointconv/blob/f39dc3e101af2f52544181ee20c14f73279b48ae/utils/provider.py#L152-L163
mailpile/Mailpile
b5e4b85fd1e584951d6d13af362ab28821466eea
mailpile/config/validators.py
python
HostNameValid
(host)
Tests whether a string is a valid host-name, return a boolean value >>> HostNameValid("127.0.0.1") True >>> HostNameValid("::1") True >>> HostNameValid("localhost") True >>> HostNameValid("22.45") False
Tests whether a string is a valid host-name, return a boolean value
[ "Tests", "whether", "a", "string", "is", "a", "valid", "host", "-", "name", "return", "a", "boolean", "value" ]
def HostNameValid(host): """ Tests whether a string is a valid host-name, return a boolean value >>> HostNameValid("127.0.0.1") True >>> HostNameValid("::1") True >>> HostNameValid("localhost") True >>> HostNameValid("22.45") False """ valid = False for attr in ["AF_INET","AF_INET6"]: try: socket.inet_pton(socket.__getattribute__(attr), host) valid = True break except (socket.error): pass if not valid: # the host is not an IP so check if its a hostname i.e. 'localhost' or 'site.com' if not host or (not DnsNameValid(host) and not ALPHA_RE.match(host)): return False else: return True else: return True
[ "def", "HostNameValid", "(", "host", ")", ":", "valid", "=", "False", "for", "attr", "in", "[", "\"AF_INET\"", ",", "\"AF_INET6\"", "]", ":", "try", ":", "socket", ".", "inet_pton", "(", "socket", ".", "__getattribute__", "(", "attr", ")", ",", "host", ")", "valid", "=", "True", "break", "except", "(", "socket", ".", "error", ")", ":", "pass", "if", "not", "valid", ":", "# the host is not an IP so check if its a hostname i.e. 'localhost' or 'site.com'", "if", "not", "host", "or", "(", "not", "DnsNameValid", "(", "host", ")", "and", "not", "ALPHA_RE", ".", "match", "(", "host", ")", ")", ":", "return", "False", "else", ":", "return", "True", "else", ":", "return", "True" ]
https://github.com/mailpile/Mailpile/blob/b5e4b85fd1e584951d6d13af362ab28821466eea/mailpile/config/validators.py#L104-L135
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/filedb/gae.py
python
DatastoreFile.read
(self, length)
return self.data.read(length)
[]
def read(self, length): return self.data.read(length)
[ "def", "read", "(", "self", ",", "length", ")", ":", "return", "self", ".", "data", ".", "read", "(", "length", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/filedb/gae.py#L69-L70
NervanaSystems/neon
8c3fb8a93b4a89303467b25817c60536542d08bd
neon/backends/nervanacpu.py
python
CPUTensor.raw
(self)
return self._tensor.ctypes.data
Access the raw buffer. Returns: pointer: A device specific pointer
Access the raw buffer.
[ "Access", "the", "raw", "buffer", "." ]
def raw(self): """ Access the raw buffer. Returns: pointer: A device specific pointer """ return self._tensor.ctypes.data
[ "def", "raw", "(", "self", ")", ":", "return", "self", ".", "_tensor", ".", "ctypes", ".", "data" ]
https://github.com/NervanaSystems/neon/blob/8c3fb8a93b4a89303467b25817c60536542d08bd/neon/backends/nervanacpu.py#L261-L268
googlecolab/colabtools
b7b3566363e2f0bad9ad256ae317314ba5bd42f8
google/colab/html/_background_server.py
python
_BackgroundServer.__init__
(self, app)
Initialize (but do not start) background server. Args: app: server application to run.
Initialize (but do not start) background server.
[ "Initialize", "(", "but", "do", "not", "start", ")", "background", "server", "." ]
def __init__(self, app): """Initialize (but do not start) background server. Args: app: server application to run. """ self._app = app # These will be initialized when starting the server. self._port = None self._server_thread = None self._ioloop = None self._server = None
[ "def", "__init__", "(", "self", ",", "app", ")", ":", "self", ".", "_app", "=", "app", "# These will be initialized when starting the server.", "self", ".", "_port", "=", "None", "self", ".", "_server_thread", "=", "None", "self", ".", "_ioloop", "=", "None", "self", ".", "_server", "=", "None" ]
https://github.com/googlecolab/colabtools/blob/b7b3566363e2f0bad9ad256ae317314ba5bd42f8/google/colab/html/_background_server.py#L32-L44
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_typed_local_object_reference.py
python
V1TypedLocalObjectReference.to_str
(self)
return pprint.pformat(self.to_dict())
Returns the string representation of the model
Returns the string representation of the model
[ "Returns", "the", "string", "representation", "of", "the", "model" ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ "def", "to_str", "(", "self", ")", ":", "return", "pprint", ".", "pformat", "(", "self", ".", "to_dict", "(", ")", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_typed_local_object_reference.py#L160-L162
gnuradio/pybombs
17044241bf835b93571026b112f179f2db7448a4
pybombs/fetchers/wget.py
python
Wget.update_src
(self, src, dest, dirname, args=None)
return self.fetch_url(src, dest, dirname, args)
For an update, we grab the archive and copy it over into the existing directory. Luckily, that's exactly the same as fetch_url().
For an update, we grab the archive and copy it over into the existing directory. Luckily, that's exactly the same as fetch_url().
[ "For", "an", "update", "we", "grab", "the", "archive", "and", "copy", "it", "over", "into", "the", "existing", "directory", ".", "Luckily", "that", "s", "exactly", "the", "same", "as", "fetch_url", "()", "." ]
def update_src(self, src, dest, dirname, args=None): """ For an update, we grab the archive and copy it over into the existing directory. Luckily, that's exactly the same as fetch_url(). """ return self.fetch_url(src, dest, dirname, args)
[ "def", "update_src", "(", "self", ",", "src", ",", "dest", ",", "dirname", ",", "args", "=", "None", ")", ":", "return", "self", ".", "fetch_url", "(", "src", ",", "dest", ",", "dirname", ",", "args", ")" ]
https://github.com/gnuradio/pybombs/blob/17044241bf835b93571026b112f179f2db7448a4/pybombs/fetchers/wget.py#L144-L149
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/volume/_colorbar.py
python
ColorBar.thickness
(self)
return self["thickness"]
Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float
Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf]
[ "Sets", "the", "thickness", "of", "the", "color", "bar", "This", "measure", "excludes", "the", "size", "of", "the", "padding", "ticks", "and", "labels", ".", "The", "thickness", "property", "is", "a", "number", "and", "may", "be", "specified", "as", ":", "-", "An", "int", "or", "float", "in", "the", "interval", "[", "0", "inf", "]" ]
def thickness(self): """ Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["thickness"]
[ "def", "thickness", "(", "self", ")", ":", "return", "self", "[", "\"thickness\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/volume/_colorbar.py#L564-L576
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py
python
WorkingSet.require
(self, *requirements)
return needed
Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set.
Ensure that distributions matching `requirements` are activated
[ "Ensure", "that", "distributions", "matching", "requirements", "are", "activated" ]
def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed
[ "def", "require", "(", "self", ",", "*", "requirements", ")", ":", "needed", "=", "self", ".", "resolve", "(", "parse_requirements", "(", "requirements", ")", ")", "for", "dist", "in", "needed", ":", "self", ".", "add", "(", "dist", ")", "return", "needed" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py#L889-L903
dropbox/changes
37e23c3141b75e4785cf398d015e3dbca41bdd56
changes/vcs/base.py
python
Vcs._construct_subprocess
(self, *args, **kwargs)
return proc
Construct a subprocess with the correct arguments and environment
Construct a subprocess with the correct arguments and environment
[ "Construct", "a", "subprocess", "with", "the", "correct", "arguments", "and", "environment" ]
def _construct_subprocess(self, *args, **kwargs): # type: (*Any, **Any) -> Popen """Construct a subprocess with the correct arguments and environment""" env = os.environ.copy() for key, value in self.get_default_env().iteritems(): env.setdefault(key, value) env.setdefault('CHANGES_SSH_REPO', self.url) for key, value in kwargs.pop('env', {}): env[key] = value kwargs['env'] = env kwargs['close_fds'] = True kwargs.setdefault('stdout', PIPE) kwargs.setdefault('stderr', PIPE) kwargs.setdefault('stdin', PIPE) proc = Popen(*args, **kwargs) return proc
[ "def", "_construct_subprocess", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# type: (*Any, **Any) -> Popen", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "self", ".", "get_default_env", "(", ")", ".", "iteritems", "(", ")", ":", "env", ".", "setdefault", "(", "key", ",", "value", ")", "env", ".", "setdefault", "(", "'CHANGES_SSH_REPO'", ",", "self", ".", "url", ")", "for", "key", ",", "value", "in", "kwargs", ".", "pop", "(", "'env'", ",", "{", "}", ")", ":", "env", "[", "key", "]", "=", "value", "kwargs", "[", "'env'", "]", "=", "env", "kwargs", "[", "'close_fds'", "]", "=", "True", "kwargs", ".", "setdefault", "(", "'stdout'", ",", "PIPE", ")", "kwargs", ".", "setdefault", "(", "'stderr'", ",", "PIPE", ")", "kwargs", ".", "setdefault", "(", "'stdin'", ",", "PIPE", ")", "proc", "=", "Popen", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "proc" ]
https://github.com/dropbox/changes/blob/37e23c3141b75e4785cf398d015e3dbca41bdd56/changes/vcs/base.py#L127-L148
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/oauth2client-4.1.3/oauth2client/_helpers.py
python
scopes_to_string
(scopes)
Converts scope value to a string. If scopes is a string then it is simply passed through. If scopes is an iterable then a string is returned that is all the individual scopes concatenated with spaces. Args: scopes: string or iterable of strings, the scopes. Returns: The scopes formatted as a single string.
Converts scope value to a string.
[ "Converts", "scope", "value", "to", "a", "string", "." ]
def scopes_to_string(scopes): """Converts scope value to a string. If scopes is a string then it is simply passed through. If scopes is an iterable then a string is returned that is all the individual scopes concatenated with spaces. Args: scopes: string or iterable of strings, the scopes. Returns: The scopes formatted as a single string. """ if isinstance(scopes, six.string_types): return scopes else: return ' '.join(scopes)
[ "def", "scopes_to_string", "(", "scopes", ")", ":", "if", "isinstance", "(", "scopes", ",", "six", ".", "string_types", ")", ":", "return", "scopes", "else", ":", "return", "' '", ".", "join", "(", "scopes", ")" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/oauth2client-4.1.3/oauth2client/_helpers.py#L143-L159
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/__init__.py
python
_find_adapter
(registry, ob)
Return an adapter factory for `ob` from `registry`
Return an adapter factory for `ob` from `registry`
[ "Return", "an", "adapter", "factory", "for", "ob", "from", "registry" ]
def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t]
[ "def", "_find_adapter", "(", "registry", ",", "ob", ")", ":", "types", "=", "_always_object", "(", "inspect", ".", "getmro", "(", "getattr", "(", "ob", ",", "'__class__'", ",", "type", "(", "ob", ")", ")", ")", ")", "for", "t", "in", "types", ":", "if", "t", "in", "registry", ":", "return", "registry", "[", "t", "]" ]
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/__init__.py#L3023-L3028